text stringlengths 1 2.12k | source dict |
|---|---|
python, python-3.x, unit-testing, event-handling
Title: Event manager based on decorators
Question: This module is a simple event manager that works with decorators. I would like feedback on:
Bugs
Any ideas to get rid of the classevent decorator, which complicates the usage, and is error-prone.
The readability of the code
The quality and thoroughness of the unit tests
event.py
"""A module that defines decorators to transform functions and methods
into events"""
from types import MethodType
from typing import Callable, TypeVar, Generic, Any, Type, Set, List
_T = TypeVar("_T")
class _Event(Generic[_T]):
def __init__(self, broadcaster: Callable[..., _T]) -> None:
self._callbacks: Set[Callable[[_T], None]] = set()
self.broadcaster = broadcaster
def subscribe(self, callback: Callable[[_T], None]) -> None:
"""Register the given callable as a subscriber.
:param callback: a function-like object that will be called
each time the event is triggered. It must be hashable
"""
self._callbacks.add(callback)
def unsubscribe(self, callback: Callable[[_T], None]) -> None:
"""Unregister the given callable.
:param callback: the subscriber to unregister
:raises KeyError: if the given callable in not registered
"""
self._callbacks.remove(callback)
def __call__(self, *args: Any, **kwargs: Any) -> _T:
try:
result = self.broadcaster(*args, **kwargs)
except TypeError as exception:
raise TypeError(
f"{exception}\nIf this is a method and `self` is missing, "
f"consider adding the `eventclass` decorator on top of the "
f"declaring class."
) from exception
for callback in self._callbacks:
callback(result)
return result | {
"domain": "codereview.stackexchange",
"id": 45476,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, unit-testing, event-handling",
"url": null
} |
python, python-3.x, unit-testing, event-handling
def _duplicate_events(obj: object) -> None:
for attr_name in dir(obj):
attr = getattr(obj, attr_name)
if not isinstance(attr, _Event):
continue
# Creating a new Event specifically for "obj" to
# avoid side effects on other instances:
event_for_instance = _Event(attr.broadcaster)
setattr(
obj,
attr_name,
MethodType(event_for_instance, obj),
)
def event(broadcaster: Callable[..., _T]) -> _Event[_T]:
"""A decorator that transforms a function into an event.
Other functions ("subscribers") can subscribe to this event,
and will be called each time this event is triggered. The
subscribers must take a single argument that corresponds
to the output of the event.
.. warning::
When decorating a method, the declaring class should be decorated
with an :func:`eventclass`
>>> calls: List[str] = []
>>>
>>> @event
... def on_greetings() -> str:
... return "Hello"
>>>
>>> def append_target(broadcaster_output: str) -> None:
... calls.append(f"{broadcaster_output} world!")
>>>
>>> on_greetings.subscribe(append_target)
>>> on_greetings()
'Hello'
>>> calls
['Hello world!']
:param broadcaster: The function to decorate, to turn it into an event
:return: An event, which other functions can subscribe to or unsubscribe from
"""
return _Event(broadcaster)
def eventclass(class_: Type[_T]) -> Type[_T]:
"""A class decorator to enable the decoration of methods with :func:`event` | {
"domain": "codereview.stackexchange",
"id": 45476,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, unit-testing, event-handling",
"url": null
} |
python, python-3.x, unit-testing, event-handling
>>> calls: List[str] = []
>>>
... @eventclass
... class GUI:
... @event
... def on_button_click(self) -> str:
... return "Clicked"
>>>
>>> def append_target(broadcaster_output: str) -> None:
... calls.append(f"{broadcaster_output} to say hello!")
>>>
>>> gui = GUI()
>>> gui.on_button_click.subscribe(append_target)
>>> gui.on_button_click()
'Clicked'
>>> calls
['Clicked to say hello!']
:param class_: The class to decorate
:return: The corresponding subclass that supports event methods
"""
class EventClass(class_): # type: ignore
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
_duplicate_events(self)
return EventClass
test_envent.py
from typing import Type, Protocol
from unittest.mock import MagicMock
import pytest
from event import event, eventclass, _Event
@pytest.fixture(name="on_say_hello")
def fixture_on_say_hello() -> _Event[str]:
@event
def on_say_hello() -> str:
return "Hello world!"
return on_say_hello
@eventclass
class Greeter(Protocol):
@event
def on_say_hello_to(self, name: str) -> str: ...
@pytest.fixture(name="greeter")
def fixture_greeter() -> Type[Greeter]:
@eventclass
class _Greeter:
@event
def on_say_hello_to(self, name: str) -> str:
return f"Hello {name}!"
return _Greeter
def test_subscribe_given_function(on_say_hello: _Event[str]) -> None:
callback = MagicMock()
on_say_hello.subscribe(callback)
assert on_say_hello() == "Hello world!"
callback.assert_called_once_with("Hello world!")
def test_unsubscribe_given_function(on_say_hello: _Event[str]) -> None:
callback = MagicMock()
on_say_hello.subscribe(callback)
on_say_hello.unsubscribe(callback)
assert on_say_hello() == "Hello world!"
callback.assert_not_called() | {
"domain": "codereview.stackexchange",
"id": 45476,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, unit-testing, event-handling",
"url": null
} |
python, python-3.x, unit-testing, event-handling
assert on_say_hello() == "Hello world!"
callback.assert_not_called()
def test_unsubscribe_given_unregistered_function_raises_key_error(
on_say_hello: _Event[str],
) -> None:
callback = MagicMock()
with pytest.raises(KeyError):
on_say_hello.unsubscribe(callback)
def test_instance_of_decorated_class_does_not_change_of_type() -> None:
@eventclass
class EventClass: ...
event_obj = EventClass()
assert isinstance(event_obj, EventClass)
def test_subscribe_given_method(greeter: Type[Greeter]) -> None:
callback = MagicMock()
event_obj = greeter()
event_obj.on_say_hello_to.subscribe(callback)
assert event_obj.on_say_hello_to("world") == "Hello world!"
callback.assert_called_once_with("Hello world!")
def test_subscribe_given_two_instances_keeps_events_isolated(
greeter: Type[Greeter],
) -> None:
a = greeter()
b = greeter()
callback_a = MagicMock()
callback_b = MagicMock()
a.on_say_hello_to.subscribe(callback_a)
b.on_say_hello_to.subscribe(callback_b)
assert a.on_say_hello_to(name="'a'") == "Hello 'a'!"
callback_a.assert_called_once_with("Hello 'a'!")
callback_b.assert_not_called()
def test_forgetting_event_class_raises_type_error(
greeter: Type[Greeter],
) -> None:
event_obj = greeter()
with pytest.raises(TypeError, match="eventclass"):
event_obj.on_say_hello_to()
Answer:
test_envent.py
nit, typo in module name.
... transforms a function into an event
quibble: Maybe "... into an event generator"
modern type annotations
from typing import ... , Set, List
That's a little weird, in the sense of being old.
Prefer set and list in recently written annotations.
LGTM, ship it! | {
"domain": "codereview.stackexchange",
"id": 45476,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, unit-testing, event-handling",
"url": null
} |
c++, algorithm, recursion, template, c++20
Title: A recursive_find_if Template Function with Unwrap Level Implementation in C++
Question: This is a follow-up question for A recursive_find_if_all Template Function Implementation in C++, A recursive_all_of Template Function Implementation in C++ and A recursive_all_of Template Function with Unwrap Level Implementation in C++. To support std::string, std::wstring, std::u8string and std::pmr::string(making recursive_find_if template function more generic), I am trying to implement recursive_find_if template function with unwrap level in this post.
The experimental implementation
recursive_find_if Template Function
/* recursive_find_if template function implementation with unwrap level
*/
template<std::size_t unwrap_level, class T, class Proj = std::identity, class UnaryPredicate>
requires(unwrap_level <= recursive_depth<T>())
constexpr auto recursive_find_if(T&& value, UnaryPredicate&& p, Proj&& proj = {}) {
if constexpr (unwrap_level > 0)
{
return std::ranges::find_if(value, [&](auto& element) {
return recursive_find_if<unwrap_level - 1>(element, p, proj);
}) != std::ranges::end(value);
}
else
{
return std::invoke(p, std::invoke(proj, value));
}
}
Full Testing Code
The full testing code:
// A recursive_find_if Template Function with Unwrap Level Implementation in C++
#include <algorithm>
#include <array>
#include <cassert>
#include <chrono>
#include <concepts>
#include <deque>
#include <execution>
#include <exception>
//#include <experimental/ranges/algorithm>
#include <experimental/array>
#include <functional>
#include <iostream>
#include <ranges>
#include <string>
#include <utility>
#include <vector>
// is_reservable concept
template<class T>
concept is_reservable = requires(T input)
{
input.reserve(1);
};
// is_sized concept, https://codereview.stackexchange.com/a/283581/231235
template<class T>
concept is_sized = requires(T x)
{
std::size(x);
}; | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
template<typename T>
concept is_summable = requires(T x) { x + x; };
// recursive_unwrap_type_t struct implementation
template<std::size_t, typename, typename...>
struct recursive_unwrap_type { };
template<class...Ts1, template<class...>class Container1, typename... Ts>
struct recursive_unwrap_type<1, Container1<Ts1...>, Ts...>
{
using type = std::ranges::range_value_t<Container1<Ts1...>>;
};
template<std::size_t unwrap_level, class...Ts1, template<class...>class Container1, typename... Ts>
requires ( std::ranges::input_range<Container1<Ts1...>> &&
requires { typename recursive_unwrap_type<
unwrap_level - 1,
std::ranges::range_value_t<Container1<Ts1...>>,
std::ranges::range_value_t<Ts>...>::type; }) // The rest arguments are ranges
struct recursive_unwrap_type<unwrap_level, Container1<Ts1...>, Ts...>
{
using type = typename recursive_unwrap_type<
unwrap_level - 1,
std::ranges::range_value_t<Container1<Ts1...>>
>::type;
};
template<std::size_t unwrap_level, typename T1, typename... Ts>
using recursive_unwrap_type_t = typename recursive_unwrap_type<unwrap_level, T1, Ts...>::type;
// recursive_variadic_invoke_result_t implementation
template<std::size_t, typename, typename, typename...>
struct recursive_variadic_invoke_result { };
template<typename F, class...Ts1, template<class...>class Container1, typename... Ts>
struct recursive_variadic_invoke_result<1, F, Container1<Ts1...>, Ts...>
{
using type = Container1<std::invoke_result_t<F,
std::ranges::range_value_t<Container1<Ts1...>>,
std::ranges::range_value_t<Ts>...>>;
}; | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
template<std::size_t unwrap_level, typename F, class...Ts1, template<class...>class Container1, typename... Ts>
requires ( std::ranges::input_range<Container1<Ts1...>> &&
requires { typename recursive_variadic_invoke_result<
unwrap_level - 1,
F,
std::ranges::range_value_t<Container1<Ts1...>>,
std::ranges::range_value_t<Ts>...>::type; }) // The rest arguments are ranges
struct recursive_variadic_invoke_result<unwrap_level, F, Container1<Ts1...>, Ts...>
{
using type = Container1<
typename recursive_variadic_invoke_result<
unwrap_level - 1,
F,
std::ranges::range_value_t<Container1<Ts1...>>,
std::ranges::range_value_t<Ts>...
>::type>;
};
template<std::size_t unwrap_level, typename F, typename T1, typename... Ts>
using recursive_variadic_invoke_result_t = typename recursive_variadic_invoke_result<unwrap_level, F, T1, Ts...>::type;
// recursive_array_invoke_result implementation
template<std::size_t, typename, typename, typename...>
struct recursive_array_invoke_result { };
template< typename F,
template<class, std::size_t> class Container,
typename T,
std::size_t N>
struct recursive_array_invoke_result<1, F, Container<T, N>>
{
using type = Container<
std::invoke_result_t<F, std::ranges::range_value_t<Container<T, N>>>,
N>;
}; | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
template< std::size_t unwrap_level,
typename F,
template<class, std::size_t> class Container,
typename T,
std::size_t N>
requires ( std::ranges::input_range<Container<T, N>> &&
requires { typename recursive_array_invoke_result<
unwrap_level - 1,
F,
std::ranges::range_value_t<Container<T, N>>>::type; }) // The rest arguments are ranges
struct recursive_array_invoke_result<unwrap_level, F, Container<T, N>>
{
using type = Container<
typename recursive_array_invoke_result<
unwrap_level - 1,
F,
std::ranges::range_value_t<Container<T, N>>
>::type, N>;
};
template< std::size_t unwrap_level,
typename F,
template<class, std::size_t> class Container,
typename T,
std::size_t N>
using recursive_array_invoke_result_t = typename recursive_array_invoke_result<unwrap_level, F, Container<T, N>>::type;
// recursive_array_unwrap_type struct implementation, https://stackoverflow.com/a/76347485/6667035
template<std::size_t, typename>
struct recursive_array_unwrap_type { };
template<template<class, std::size_t> class Container,
typename T,
std::size_t N>
struct recursive_array_unwrap_type<1, Container<T, N>>
{
using type = std::ranges::range_value_t<Container<T, N>>;
}; | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
template<std::size_t unwrap_level, template<class, std::size_t> class Container,
typename T,
std::size_t N>
requires ( std::ranges::input_range<Container<T, N>> &&
requires { typename recursive_array_unwrap_type<
unwrap_level - 1,
std::ranges::range_value_t<Container<T, N>>>::type; }) // The rest arguments are ranges
struct recursive_array_unwrap_type<unwrap_level, Container<T, N>>
{
using type = typename recursive_array_unwrap_type<
unwrap_level - 1,
std::ranges::range_value_t<Container<T, N>>
>::type;
};
template<std::size_t unwrap_level, class Container>
using recursive_array_unwrap_type_t = typename recursive_array_unwrap_type<unwrap_level, Container>::type;
// https://codereview.stackexchange.com/a/253039/231235
template<std::size_t dim, class T, template<class...> class Container = std::vector>
constexpr auto n_dim_container_generator(T input, std::size_t times)
{
if constexpr (dim == 0)
{
return input;
}
else
{
return Container(times, n_dim_container_generator<dim - 1, T, Container>(input, times));
}
}
template<std::size_t dim, std::size_t times, class T>
constexpr auto n_dim_array_generator(T input)
{
if constexpr (dim == 0)
{
return input;
}
else
{
std::array<decltype(n_dim_array_generator<dim - 1, times>(input)), times> output;
for (size_t i = 0; i < times; i++)
{
output[i] = n_dim_array_generator<dim - 1, times>(input);
}
return output;
}
}
// recursive_depth function implementation
template<typename T>
constexpr std::size_t recursive_depth()
{
return std::size_t{0};
}
template<std::ranges::input_range Range>
constexpr std::size_t recursive_depth()
{
return recursive_depth<std::ranges::range_value_t<Range>>() + std::size_t{1};
} | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
// recursive_depth function implementation with target type
template<typename T_Base, typename T>
constexpr std::size_t recursive_depth()
{
return std::size_t{0};
}
template<typename T_Base, std::ranges::input_range Range>
requires (!std::same_as<Range, T_Base>)
constexpr std::size_t recursive_depth()
{
return recursive_depth<T_Base, std::ranges::range_value_t<Range>>() + std::size_t{1};
}
/* recursive_foreach_all template function performs specific function on input container exhaustively
https://codereview.stackexchange.com/a/286525/231235
*/
namespace impl {
template<class F, class Proj = std::identity>
struct recursive_for_each_state {
F f;
Proj proj;
};
// recursive_foreach_all template function implementation
template<class T, class State>
requires (recursive_depth<T>() == 0)
constexpr void recursive_foreach_all(T& value, State& state) {
std::invoke(state.f, std::invoke(state.proj, value));
}
template<class T, class State>
requires (recursive_depth<T>() != 0)
constexpr void recursive_foreach_all(T& inputRange, State& state) {
for (auto& item: inputRange)
impl::recursive_foreach_all(item, state);
}
// recursive_reverse_foreach_all template function implementation
template<class T, class State>
requires (recursive_depth<T>() == 0)
constexpr void recursive_reverse_foreach_all(T& value, State& state) {
std::invoke(state.f, std::invoke(state.proj, value));
}
template<class T, class State>
requires (recursive_depth<T>() != 0)
constexpr void recursive_reverse_foreach_all(T& inputRange, State& state) {
for (auto& item: inputRange | std::views::reverse)
impl::recursive_reverse_foreach_all(item, state);
}
} | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
template<class T, class Proj = std::identity, class F>
constexpr auto recursive_foreach_all(T& inputRange, F f, Proj proj = {})
{
impl::recursive_for_each_state state(std::move(f), std::move(proj));
impl::recursive_foreach_all(inputRange, state);
return std::make_pair(inputRange.end(), std::move(state.f));
}
template<class T, class Proj = std::identity, class F>
constexpr auto recursive_reverse_foreach_all(T& inputRange, F f, Proj proj = {})
{
impl::recursive_for_each_state state(std::move(f), std::move(proj));
impl::recursive_reverse_foreach_all(inputRange, state);
return std::make_pair(inputRange.end(), std::move(state.f));
}
template<class T, class I, class F>
constexpr auto recursive_fold_left_all(const T& inputRange, I init, F f)
{
recursive_foreach_all(inputRange, [&](auto& value) {
init = std::invoke(f, init, value);
});
return init;
}
template<class T, class I, class F>
constexpr auto recursive_fold_right_all(const T& inputRange, I init, F f)
{
recursive_reverse_foreach_all(inputRange, [&](auto& value) {
init = std::invoke(f, value, init);
});
return init;
}
// recursive_count_if template function implementation
template<class T, std::invocable<T> Pred>
requires (recursive_depth<T>() == 0)
constexpr std::size_t recursive_count_if_all(const T& input, const Pred& predicate)
{
return predicate(input) ? std::size_t{1} : std::size_t{0};
}
template<std::ranges::input_range Range, class Pred>
requires (recursive_depth<Range>() != 0)
constexpr auto recursive_count_if_all(const Range& input, const Pred& predicate)
{
return std::transform_reduce(std::ranges::cbegin(input), std::ranges::cend(input), std::size_t{}, std::plus<std::size_t>(), [predicate](auto&& element) {
return recursive_count_if_all(element, predicate);
});
} | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
template<std::size_t unwrap_level, class T, class Pred>
requires(unwrap_level <= recursive_depth<T>())
constexpr auto recursive_count_if(const T& input, const Pred& predicate)
{
if constexpr (unwrap_level > 0)
{
return std::transform_reduce(std::ranges::cbegin(input), std::ranges::cend(input), std::size_t{}, std::plus<std::size_t>(), [predicate](auto&& element) {
return recursive_count_if<unwrap_level - 1>(element, predicate);
});
}
else
{
return predicate(input) ? 1 : 0;
}
}
/* recursive_all_of template function implementation with unwrap level
*/
template<std::size_t unwrap_level, class T, class Proj = std::identity, class UnaryPredicate>
requires(unwrap_level <= recursive_depth<T>())
constexpr auto recursive_all_of(T&& value, UnaryPredicate&& p, Proj&& proj = {}) {
if constexpr (unwrap_level > 0)
{
return std::ranges::all_of(value, [&](auto&& element) {
return recursive_all_of<unwrap_level - 1>(element, p, proj);
});
}
else
{
return std::invoke(p, std::invoke(proj, value));
}
}
/* recursive_find_if template function implementation with unwrap level
*/
template<std::size_t unwrap_level, class T, class Proj = std::identity, class UnaryPredicate>
requires(unwrap_level <= recursive_depth<T>())
constexpr auto recursive_find_if(T&& value, UnaryPredicate&& p, Proj&& proj = {}) {
if constexpr (unwrap_level > 0)
{
return std::ranges::find_if(value, [&](auto& element) {
return recursive_find_if<unwrap_level - 1>(element, p, proj);
}) != std::ranges::end(value);
}
else
{
return std::invoke(p, std::invoke(proj, value));
}
} | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
// recursive_any_of template function implementation with unwrap level
template<std::size_t unwrap_level, class T, class Proj = std::identity, class UnaryPredicate>
constexpr auto recursive_any_of(T&& value, UnaryPredicate&& p, Proj&& proj = {})
{
return recursive_find_if<unwrap_level>(value, p, proj);
}
// recursive_none_of template function implementation with unwrap level
template<std::size_t unwrap_level, class T, class Proj = std::identity, class UnaryPredicate>
constexpr auto recursive_none_of(T&& value, UnaryPredicate&& p, Proj&& proj = {})
{
return !recursive_any_of<unwrap_level>(value, p, proj);
}
template<std::ranges::input_range T>
constexpr auto recursive_print(const T& input, const int level = 0)
{
T output = input;
std::cout << std::string(level, ' ') << "Level " << level << ":" << std::endl;
std::transform(input.cbegin(), input.cend(), output.begin(),
[level](auto&& x)
{
std::cout << std::string(level, ' ') << x << std::endl;
return x;
}
);
return output;
}
template<std::ranges::input_range T>
requires (std::ranges::input_range<std::ranges::range_value_t<T>>)
constexpr T recursive_print(const T& input, const int level = 0)
{
T output = input;
std::cout << std::string(level, ' ') << "Level " << level << ":" << std::endl;
std::ranges::transform(std::ranges::cbegin(input), std::ranges::cend(input), std::ranges::begin(output),
[level](auto&& element)
{
return recursive_print(element, level + 1);
}
);
return output;
}
void recursive_find_if_tests()
{
auto test_vectors_1 = n_dim_container_generator<4, int, std::vector>(1, 3);
test_vectors_1[0][0][0][0] = 2;
assert(recursive_find_if<4>(test_vectors_1, [](auto&& i) { return i % 2 == 0; })); | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
auto test_vectors_2 = n_dim_container_generator<4, int, std::vector>(3, 3);
assert(recursive_find_if<4>(test_vectors_2, [](auto&& i) { return i % 2 == 0; }) == false);
// Tests with std::string
auto test_vector_string = n_dim_container_generator<4, std::string, std::vector>("1", 3);
assert(recursive_find_if<4>(test_vector_string, [](auto&& i) { return i == "1"; }));
assert(recursive_find_if<4>(test_vector_string, [](auto&& i) { return i == "2"; }) == false);
// Tests with std::string, projection
assert(recursive_find_if<4>(
test_vector_string,
[](auto&& i) { return i == "1"; },
[](auto&& element) {return std::to_string(std::stoi(element) + 1); }) == false);
assert(recursive_find_if<4>(
test_vector_string,
[](auto&& i) { return i == "2"; },
[](auto&& element) {return std::to_string(std::stoi(element) + 1); }));
// Tests with std::array of std::string
std::array<std::string, 3> word_array1 = {"foo", "foo", "foo"};
assert(recursive_find_if<1>(word_array1, [](auto&& i) { return i == "foo"; }));
assert(recursive_find_if<1>(word_array1, [](auto&& i) { return i == "bar"; }) == false);
// Tests with std::deque of std::string
std::deque<std::string> word_deque1 = {"foo", "foo", "foo", "bar"};
assert(recursive_find_if<1>(word_deque1, [](auto&& i) { return i == "foo"; }));
assert(recursive_find_if<1>(word_deque1, [](auto&& i) { return i == "bar"; }));
assert(recursive_find_if<1>(word_deque1, [](auto&& i) { return i == "abcd"; }) == false);
assert(recursive_find_if<2>(word_deque1, [](auto&& i) { return i == 'a'; }));
assert(recursive_find_if<2>(word_deque1, [](auto&& i) { return i == 'b'; }));
assert(recursive_find_if<2>(word_deque1, [](auto&& i) { return i == 'c'; }) == false); | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
std::vector<std::wstring> wstring_vector1{};
for(int i = 0; i < 4; ++i)
{
wstring_vector1.push_back(std::to_wstring(1));
}
assert(recursive_find_if<1>(wstring_vector1, [](auto&& i) { return i == std::to_wstring(1); }));
assert(recursive_find_if<1>(wstring_vector1, [](auto&& i) { return i == std::to_wstring(2); }) == false);
std::vector<std::u8string> u8string_vector1{};
for(int i = 0; i < 4; ++i)
{
u8string_vector1.push_back(u8"\u20AC2.00");
}
assert(recursive_find_if<1>(u8string_vector1, [](auto&& i) { return i == u8"\u20AC2.00"; }));
assert(recursive_find_if<1>(u8string_vector1, [](auto&& i) { return i == u8"\u20AC1.00"; }) == false);
std::pmr::string pmr_string1 = "123";
std::vector<std::pmr::string> pmr_string_vector1 = {pmr_string1, pmr_string1, pmr_string1};
assert(recursive_find_if<1>(pmr_string_vector1, [](auto&& i) { return i == "123"; }));
assert(recursive_find_if<1>(pmr_string_vector1, [](auto&& i) { return i == "456"; }) == false);
std::cout << "All tests passed!\n";
return;
}
void recursive_any_of_tests()
{
auto test_vectors_1 = n_dim_container_generator<4, int, std::vector>(1, 3);
test_vectors_1[0][0][0][0] = 2;
assert(recursive_any_of<4>(test_vectors_1, [](auto&& i) { return i % 2 == 0; }));
auto test_vectors_2 = n_dim_container_generator<4, int, std::vector>(3, 3);
assert(recursive_any_of<4>(test_vectors_2, [](auto&& i) { return i % 2 == 0; }) == false);
// Tests with std::string
auto test_vector_string = n_dim_container_generator<4, std::string, std::vector>("1", 3);
assert(recursive_any_of<4>(test_vector_string, [](auto&& i) { return i == "1"; }));
assert(recursive_any_of<4>(test_vector_string, [](auto&& i) { return i == "2"; }) == false); | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
// Tests with std::string, projection
assert(recursive_any_of<4>(
test_vector_string,
[](auto&& i) { return i == "1"; },
[](auto&& element) {return std::to_string(std::stoi(element) + 1); }) == false);
assert(recursive_any_of<4>(
test_vector_string,
[](auto&& i) { return i == "2"; },
[](auto&& element) {return std::to_string(std::stoi(element) + 1); }));
// Tests with std::array of std::string
std::array<std::string, 3> word_array1 = {"foo", "foo", "foo"};
assert(recursive_any_of<1>(word_array1, [](auto&& i) { return i == "foo"; }));
assert(recursive_any_of<1>(word_array1, [](auto&& i) { return i == "bar"; }) == false);
// Tests with std::deque of std::string
std::deque<std::string> word_deque1 = {"foo", "foo", "foo", "bar"};
assert(recursive_any_of<1>(word_deque1, [](auto&& i) { return i == "foo"; }));
assert(recursive_any_of<1>(word_deque1, [](auto&& i) { return i == "bar"; }));
assert(recursive_any_of<1>(word_deque1, [](auto&& i) { return i == "abcd"; }) == false);
assert(recursive_any_of<2>(word_deque1, [](auto&& i) { return i == 'a'; }));
assert(recursive_any_of<2>(word_deque1, [](auto&& i) { return i == 'b'; }));
assert(recursive_any_of<2>(word_deque1, [](auto&& i) { return i == 'c'; }) == false);
std::vector<std::wstring> wstring_vector1{};
for(int i = 0; i < 4; ++i)
{
wstring_vector1.push_back(std::to_wstring(1));
}
assert(recursive_any_of<1>(wstring_vector1, [](auto&& i) { return i == std::to_wstring(1); }));
assert(recursive_any_of<1>(wstring_vector1, [](auto&& i) { return i == std::to_wstring(2); }) == false); | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
std::vector<std::u8string> u8string_vector1{};
for(int i = 0; i < 4; ++i)
{
u8string_vector1.push_back(u8"\u20AC2.00");
}
assert(recursive_any_of<1>(u8string_vector1, [](auto&& i) { return i == u8"\u20AC2.00"; }));
assert(recursive_any_of<1>(u8string_vector1, [](auto&& i) { return i == u8"\u20AC1.00"; }) == false);
std::pmr::string pmr_string1 = "123";
std::vector<std::pmr::string> pmr_string_vector1 = {pmr_string1, pmr_string1, pmr_string1};
assert(recursive_any_of<1>(pmr_string_vector1, [](auto&& i) { return i == "123"; }));
assert(recursive_any_of<1>(pmr_string_vector1, [](auto&& i) { return i == "456"; }) == false);
std::cout << "All tests passed!\n";
return;
}
void recursive_none_of_tests()
{
auto test_vectors_1 = n_dim_container_generator<4, int, std::vector>(1, 3);
test_vectors_1[0][0][0][0] = 2;
assert(recursive_none_of<4>(test_vectors_1, [](auto&& i) { return i % 2 == 0; }) == false);
auto test_vectors_2 = n_dim_container_generator<4, int, std::vector>(3, 3);
assert(recursive_none_of<4>(test_vectors_2, [](auto&& i) { return i % 2 == 0; }));
// Tests with std::string
auto test_vector_string = n_dim_container_generator<4, std::string, std::vector>("1", 3);
assert(recursive_none_of<4>(test_vector_string, [](auto&& i) { return i == "1"; }) == false);
assert(recursive_none_of<4>(test_vector_string, [](auto&& i) { return i == "2"; })); | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
// Tests with std::string, projection
assert(recursive_none_of<4>(
test_vector_string,
[](auto&& i) { return i == "1"; },
[](auto&& element) {return std::to_string(std::stoi(element) + 1); }));
assert(recursive_none_of<4>(
test_vector_string,
[](auto&& i) { return i == "2"; },
[](auto&& element) {return std::to_string(std::stoi(element) + 1); }) == false);
// Tests with std::array of std::string
std::array<std::string, 3> word_array1 = {"foo", "foo", "foo"};
assert(recursive_none_of<1>(word_array1, [](auto&& i) { return i == "foo"; }) == false);
assert(recursive_none_of<1>(word_array1, [](auto&& i) { return i == "bar"; }));
// Tests with std::deque of std::string
std::deque<std::string> word_deque1 = {"foo", "foo", "foo", "bar"};
assert(recursive_none_of<1>(word_deque1, [](auto&& i) { return i == "foo"; }) == false);
assert(recursive_none_of<1>(word_deque1, [](auto&& i) { return i == "bar"; }) == false);
assert(recursive_none_of<1>(word_deque1, [](auto&& i) { return i == "abcd"; }));
assert(recursive_none_of<2>(word_deque1, [](auto&& i) { return i == 'a'; }) == false);
assert(recursive_none_of<2>(word_deque1, [](auto&& i) { return i == 'b'; }) == false);
assert(recursive_none_of<2>(word_deque1, [](auto&& i) { return i == 'c'; }));
std::vector<std::wstring> wstring_vector1{};
for(int i = 0; i < 4; ++i)
{
wstring_vector1.push_back(std::to_wstring(1));
}
assert(recursive_none_of<1>(wstring_vector1, [](auto&& i) { return i == std::to_wstring(1); }) == false);
assert(recursive_none_of<1>(wstring_vector1, [](auto&& i) { return i == std::to_wstring(2); })); | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
std::vector<std::u8string> u8string_vector1{};
for(int i = 0; i < 4; ++i)
{
u8string_vector1.push_back(u8"\u20AC2.00");
}
assert(recursive_none_of<1>(u8string_vector1, [](auto&& i) { return i == u8"\u20AC2.00"; }) == false);
assert(recursive_none_of<1>(u8string_vector1, [](auto&& i) { return i == u8"\u20AC1.00"; }));
std::pmr::string pmr_string1 = "123";
std::vector<std::pmr::string> pmr_string_vector1 = {pmr_string1, pmr_string1, pmr_string1};
assert(recursive_none_of<1>(pmr_string_vector1, [](auto&& i) { return i == "123"; }) == false);
assert(recursive_none_of<1>(pmr_string_vector1, [](auto&& i) { return i == "456"; }));
std::cout << "All tests passed!\n";
return;
}
int main()
{
auto start = std::chrono::system_clock::now();
recursive_find_if_tests();
recursive_any_of_tests();
recursive_none_of_tests();
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end - start;
std::time_t end_time = std::chrono::system_clock::to_time_t(end);
std::cout << "Computation finished at " << std::ctime(&end_time) << "elapsed time: " << elapsed_seconds.count() << '\n';
return EXIT_SUCCESS;
}
The output of the test code above:
All tests passed!
All tests passed!
All tests passed!
Computation finished at Mon Feb 5 06:35:28 2024
elapsed time: 0.00237049
Godbolt link is here.
All suggestions are welcome.
The summary information:
Which question it is a follow-up to?
A recursive_find_if_all Template Function Implementation in C++,
A recursive_all_of Template Function Implementation in C++ and
A recursive_all_of Template Function with Unwrap Level Implementation in C++
What changes has been made in the code since last question?
I am trying to implement recursive_find_if template function with unwrap level in this post. | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
Why a new review is being asked for?
Please review the revised recursive_find_if template function and its tests, including naming, performance, usability and any other aspects. About the issue of constraining the types Proj and UnaryPredicate, I am still trying to figure out any possible method.
Answer: This looks fine, except that you indeed did not constrain Proj and UnaryPredicate, which will result in hard to read error messages when you pass a projection and/or unary predicate of the wrong type.
Let's look at how std::ranges::find_if() constrains them:
template<ranges::input_range R, class Proj = std::identity,
std::indirect_unary_predicate<std::projected<ranges::iterator_t<R>, Proj>> Pred>
constexpr ranges::borrowed_iterator_t<R>
find_if(R&& r, Pred pred, Proj proj = {});
So it contrains the input range and the predicate. It doesn't constrain the projection directly, but any errors there will be caught by the constraint on the predicate. Let's start with the constraint on the input range. In your case, you want T to be a nested range of at least unwrap_levels deep. Unfortunately, you can't use if constexpr inside a concept definition, and you also can't declare recursive concepts. But the trick is to create a type trait using recursive structs, or even better, a recursive constexpr function:
template<std::size_t unwrap_level, typename T>
static constexpr bool is_recursive_input_range() {
if constexpr (unwrap_level == 0) {
return true;
} else if constexpr (std::ranges::input_range<T>) {
return is_recursive_input_range<unwrap_level - 1,
std::ranges::range_value_t<T>>();
} else {
return false;
}
}
And then we can create a concept based on that:
template<typename T, std::size_t unwrap_level>
concept recursive_input_range =
is_recursive_input_range<unwrap_level, T>(); | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, algorithm, recursion, template, c++20
This allows you to write:
template<std::size_t unwrap_level, recursive_input_range<unwrap_level> T, …>
constexpr auto recursive_find_if(T&& value, …) {
…
}
That should give you an idea of how to create recursive concepts. It unfortunately requires some puzzle solving skills. But now you should be able to create a constraint for Pred. At first glance, you don't even need to create a new concept, you only need to create a recursive version of std::ranges::iterator_t, so that you can write:
template<
std::size_t unwrap_level,
recursive_input_range<unwrap_level> T,
typename Proj = std::identity,
std::indirect_unary_predicate<
std::projected<recursive_iterator_t<unwrap_level, T>, Proj>
> Pred
>
constexpr auto recursive_find_if(T&& value, …) {
…
}
However, this doesn't work if you unwrap it all the way to the non-range type. Maybe there are other type traits and concepts in the standard library that would help you create a working version, but if not, create your own concept, maybe so you can write it like:
template<
std::size_t unwrap_level,
recursive_input_range<unwrap_level> T,
typename Proj = std::identity,
recursive_projected_predicate<unwrap_level, T, Proj> Pred
>
constexpr auto recursive_find_if(T&& value, …) {
…
}
I'll leave that as an exercise for the reader. | {
"domain": "codereview.stackexchange",
"id": 45477,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, recursion, template, c++20",
"url": null
} |
c++, parsing
Title: Natural language text fast tokenizer
Question: Could you please conduct code review for the code below and suggest some improvements?
Functional specification
Implement a function for fast tokenization of text in char[] buffer handling some natural language specifics below:
Consider ‘ ‘ (space) as a delimiter, keeping a way to extends the list of delimiters later.
Extract stable collocations like “i.e.”, “etc.”, “…” as a single lexem.
In case word contains characters like ‘-‘ and ‘’’ (examples: semi-column, cat’s) return the whole construct as a whole lexem.
Return sequences of numbers (integers without signs) as a single lexem. | {
"domain": "codereview.stackexchange",
"id": 45478,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
Performance is critical, since the amount of data is huge. The function must be thread-safe.
Design
Since performance is critical, the function work with raw pointers to const char. It gets the argument as the reference to the pointer to the place where from it should start parsing and updates this pointer, moving to the position past the read lexem.
Since initial characters could be delimiters, function returns the real starting position of the lexem found.
Concerns on the current implementation
Most likely, for such tasks the regex library should be used, but I am not sure if this “write-only” language (regex) (for me at least; you write once and can’t read and maintain it at all, rewriting from scratch every time) will be extendable when new requirements come. If I am wrong, I will be thankful for the maintainable version with the regex.
Another concern on the regex usage is performance. The code is expected to work with locales and this could be quite slow if underlying regex implementation somehow uses isalpha, etc.
I feel that with std::ranges this could be implemented simpler, so is case of any suggestions, please, share.
I feel that main loop could be simplified and nested loop at the end could be removed, but can't find better solution for now.
With all these static vectors and intention to make it configurable and extendable in future, I am in two minds if to make a class or namespace from this in order to being able to configure with delimiters, stable lexems, inwords lexems, etc. Would it be extendability or overengineering?
The code
The fully functional demo
#include <algorithm>
#include <iostream>
#include <vector>
#include <string.h> | {
"domain": "codereview.stackexchange",
"id": 45478,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
// Returs lexem start point or nullptr if lexem not found
// Moves the passed pointer to the position past the lexem
inline const char* get_lexem(const char*& p)
{
const static std::vector delimiters = { ' ' }; // Could be extened to many different delimiters
const static std::vector<const char*> stable_lexems = { "i.e.", "etc.", "..." }; // Planned to be externally configurable
const static std::vector<char> inword_lexems = { '-', '\'' }; // Not sure how to process this better
const char* start = p;
while (*p && p == start) {
while (delimiters.end() != std::find(delimiters.begin(), delimiters.end(), *p)) {
++p;
if (!*p)
return nullptr;
}
auto it = std::find_if(stable_lexems.begin(), stable_lexems.end(), [&](const char* lexem) {
size_t length = strlen(lexem);
return !strncmp(p, lexem, length);
});
start = p;
if (it != stable_lexems.end()) {
p += strlen(*it);
return start;
}
while (*p && (delimiters.end() == find(delimiters.begin(), delimiters.end(), *p))) {
const bool is_inword_char = inword_lexems.end() != std::find(inword_lexems.begin(), inword_lexems.end(), *p);
if (is_inword_char && p != start && isalpha(*(p - 1))) {
++p;
continue;
}
if (!isalpha(*p) && !isdigit(*p)) {
if (p == start) {
++p;
}
break;
}
++p;
}
}
return start;
}
int main()
{
const char sample[] = "Let's conisder this semi-simple sample, i.e. test data with ints: 100, etc. For ... some testing...";
const char* lexem = nullptr;
const char* lexem_end = sample;
while (true) {
lexem = get_lexem(lexem_end);
if (!(lexem && lexem != lexem_end))
break; | {
"domain": "codereview.stackexchange",
"id": 45478,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
if (!(lexem && lexem != lexem_end))
break;
std::string token(lexem, lexem_end - lexem);
std::cout << token << "\n";
}
}
Answer: Avoid using char*
C++ provides much nicer and safer ways to deal with strings and string slices than C's char*. Before C++17, I recommend you just use std::string where possible. This might cause some unnecessary copies to be made though. Luckily, since C++17 we have std::string_view; this is just a view of another string, it doesn't hold a copy itself.
Make it work like a range
Wouldn't it be nice if the code in main() could be written like this?
int main() {
std::string sample = "Let's consider…";
for (auto token: tokenize(sample)) {
std::cout << token << '\n';
}
}
You can do this by making a function tokenize() that returns a class that has two functions, begin() and end(), that both return a token iterator. Here is a possible way to do this:
class TokenRange {
std::string_view data;
public:
class Iterator {
std::string_view data;
public:
Iterator(std::string_view data = {}): data(data) {}
Iterator& operator++();
std::string_view operator*() const;
friend bool operator==(const Iterator&, const Iterator&) = default;
};
TokenRange(std::string_view data): data(data) {}
Iterator begin() {
return Iterator(data);
}
Iterator end() {
return {};
}
}; | {
"domain": "codereview.stackexchange",
"id": 45478,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
Iterator end() {
return {};
}
};
So now if you write tokenize(sample).begin(), you get a TokenRange::Iterator whose member data is a view of the whole string sample. Now if you try to dereference that iterator using *, then you expect it to return the value of the first item in the range, or in this case, the first token. That's most of what you do in get_lexem(). So get_lexem() can be turned into TokenRange::Iterator::operator*().
Of course, operator++() will probably be called soon afterwards, so you want to make sure you don't duplicate most of operator*() just to know how much you have to skip over. Find some way to only have to scan for a token once.
While std::string_view is more than just a pointer, and you might even need more member variables in Iterator, any decent compiler will inline all these things and optimize them away.
Making a standards-compliant iterator is a little bit more work, but you can inherit from std::iterator. Also see this tutorial.
Consider making use of more recent C++ features
Instead of std::find(delimiters.begin(), delimiters.end(), *p), you could write std::ranges::find(delimiters, *p) instead. Or even better:
while (std::ranges::contains(delimiters, *p)) {
++p;
}
if (!*p) {
…
}
Use std::string_view's compare() member function instead of strncmp(), and of course size() insead of strlen(). Or if you just want to check if a string begins with a given text, then use starts_with().
Going full std::ranges
It sounds like this should be a problem that could be solved with std::ranges. Something like:
auto tokens = data
| std::views::lazy_split(delimiters)
| …; | {
"domain": "codereview.stackexchange",
"id": 45478,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
However, your definition of a token is complex enough that writing it out using just views would either result in some horrible looking code, or it might be very inefficient. If I would go this route, then I would start with a function that maps each charater to some enum that describes which class it belongs to: delimiter, in-word lexem, alphanumerics and other. Then you can use other views to split on delimiters, and use something like std::views::adjacent_transform() to eliminate in-word lexems at the start of a token. Still, checking for stable lexems seems to be hard.
I would probably keep the structure of get_lexem(). However, just like you can make a tokenize() function that returns a range, you could make your own tokenize_view() so you can write:
auto tokens = data | tokenize_view();
The main advantage of that would be that you could then use that inside a larger pipeline of views. | {
"domain": "codereview.stackexchange",
"id": 45478,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, memory-management, mocks
Title: Track and trace allocations
Question: When testing or debugging allocator-aware objects, it can be useful to provide allocators that can provide insight into how they get called. The Tracing_alloc from A fixed-size dynamic array is a reasonable starting point, upon which I've built.
I provide four allocator adapters, which all modify an underlying allocator:
logged, which records each operation to the standard log stream,
checked, which ensures that operations are correctly paired,
shared_copyable, which allows a move-only allocator to be used in objects that expect to copy it, and
no_delete_exceptions, which converts exceptions in deallocate() and destruct() into messages directed to the error stream.
#ifndef ALLOCATOR_TRACING_HPP
#define ALLOCATOR_TRACING_HPP
#include <algorithm>
#include <format>
#include <iostream>
#include <map>
#include <memory>
#include <ranges>
#include <stdexcept>
#include <utility>
#include <vector>
namespace alloc {
// Tracing allocator, based on a class written by L.F.
// <URI: https://codereview.stackexchange.com/q/221719/75307 >
template<typename Base>
requires requires { typename std::allocator_traits<Base>; }
struct logged : Base
{
using traits = std::allocator_traits<Base>;
using value_type = traits::value_type;
using pointer = traits::pointer;
// Since our first (only) template argument is _not_ the same
// as value_type, we must provide rebind.
template<class T>
struct rebind { using other = logged<typename traits::rebind_alloc<T>>; };
using Base::Base; | {
"domain": "codereview.stackexchange",
"id": 45479,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, memory-management, mocks",
"url": null
} |
c++, memory-management, mocks
using Base::Base;
pointer allocate(std::size_t n)
{
std::clog << "allocate " << n;
pointer p;
try {
p = traits::allocate(*this, n);
} catch (...) {
std::clog << " FAILED\n";
throw;
}
const void *const pv = p;
std::clog << " = " << pv << '\n';
return p;
}
void deallocate(pointer p, std::size_t n)
{
const void *const pv = p;
std::clog << "deallocate " << n << " @" << pv;
try {
traits::deallocate(*this, p, n);
} catch (...) {
std::clog << " FAILED\n";
throw;
}
std::clog << '\n';
}
template <typename... Args>
requires std::constructible_from<value_type, Args...>
void construct(pointer p, Args&&... args)
{
const void *const pv = p;
std::clog << "construct @" << pv;
try {
traits::construct(*this, p, std::forward<Args>(args)...);
} catch (...) {
std::clog << " FAILED\n";
throw;
}
std::clog << '\n';
}
void destroy(pointer p)
{
const void *const pv = p;
std::clog << "destroy @" << pv;
try {
traits::destroy(*this, p);
} catch (...) {
std::clog << " FAILED\n";
throw;
}
std::clog << '\n';
}
};
// Verifying allocator.
// Diagnoses these common problems:
// - mismatched construction/destruction
// - attempts to operate on memory from other allocators.
// N.B. contains no locking, as intended use in unit-test is
// expected to be single-threaded. If a thread-safe version
// really is needed, write another wrapper for that! | {
"domain": "codereview.stackexchange",
"id": 45479,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, memory-management, mocks",
"url": null
} |
c++, memory-management, mocks
template<typename Base>
requires requires { typename std::allocator_traits<Base>; }
class checked : public Base
{
public:
using traits = std::allocator_traits<Base>;
using value_type = traits::value_type;
using pointer = traits::pointer;
template<class T>
struct rebind { using other = checked<typename traits::rebind_alloc<T>>; };
#if __cplusplus < 2026'01L
// prior to C++26, we could inherit incorrect type
// (LWG issue 3170; https://wg21.link/P2868R1)
using is_always_equal = std::false_type;
#endif
private:
enum class state : unsigned char { initial, alive, dead };
// states of all allocated values
std::map<pointer, std::vector<state>, std::greater<>> population = {};
public:
using Base::Base;
// Move-only class - see shared_copyable below if copying is required.
checked(const checked&) = delete;
auto& operator=(const checked&) = delete;
checked(checked&&) = default;
checked& operator=(checked&&) = default;
~checked() noexcept
{
try {
assert_empty();
} catch (std::logic_error& e) {
// We can't throw in a destructor, so print a message instead
std::cerr << e.what() << '\n';
}
}
pointer allocate(std::size_t n)
{
auto p = traits::allocate(*this, n);
population.try_emplace(p, n, state::initial);
return p;
} | {
"domain": "codereview.stackexchange",
"id": 45479,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, memory-management, mocks",
"url": null
} |
c++, memory-management, mocks
void deallocate(pointer p, std::size_t n)
{
auto it = population.find(p);
if (it == population.end()) [[unlikely]] {
logic_error("deallocate without allocate");
}
if (std::ranges::contains(it->second, state::alive)) [[unlikely]] {
logic_error("deallocate live objects");
}
if (n != it->second.size()) [[unlikely]] {
logic_error(std::format("deallocate {} but {} allocated",
n, it->second.size()));
}
traits::deallocate(*this, p, n);
population.erase(it);
}
template<typename... Args>
requires std::constructible_from<value_type, Args...>
void construct(pointer p, Args&&... args)
{
auto& p_state = get_state(p);
if (p_state == state::alive) [[unlikely]] {
logic_error("construct already-constructed object");
}
traits::construct(*this, p, std::forward<Args>(args)...);
// it's alive iff the constructor returns successfully
p_state = state::alive;
}
void destroy(pointer p)
{
switch (std::exchange(get_state(p), state::dead)) {
case state::initial:
logic_error("destruct unconstructed object");
case state::dead:
logic_error("destruct already-destructed object");
[[likely]]
case state::alive:
break;
}
traits::destroy(*this, p);
} | {
"domain": "codereview.stackexchange",
"id": 45479,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, memory-management, mocks",
"url": null
} |
c++, memory-management, mocks
void assert_empty() const {
if (population.empty()) [[likely]] {
return;
}
// Failed - gather more information
static auto const count_living = [](auto const& pair) {
return std::ranges::count(pair.second, state::alive);
};
auto counts = population | std::views::transform(count_living);
logic_error(std::format("destructing with {} block(s) still containing {} live object(s)",
population.size(), std::ranges::fold_left(counts, 0uz, std::plus<> {})));
}
private:
auto& get_state(pointer p) {
auto it = population.lower_bound(p);
if (it == population.end()) [[unlikely]] {
logic_error("construct/destruct unallocated object");
}
auto second = it->first + it->second.size();
if (std::greater {}(p, second)) [[unlikely]] {
logic_error("construct/destruct unallocated object");
}
return it->second[p - it->first];
}
// A single point of tracing can be useful as a debugging breakpoint
[[noreturn]] void logic_error(auto&& message) const {
throw std::logic_error(message);
}
};
// An allocator wrapper whose copies all share an instance of the
// underlying allocator. This can be needed for implementations
// that assume all allocators are copyable.
template<typename Underlying>
requires requires { typename std::allocator_traits<Underlying>; }
class shared_copyable
{
std::shared_ptr<Underlying> alloc; | {
"domain": "codereview.stackexchange",
"id": 45479,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, memory-management, mocks",
"url": null
} |
c++, memory-management, mocks
public:
using traits = std::allocator_traits<Underlying>;
using value_type = traits::value_type;
using pointer = traits::pointer;
using const_pointer = traits::const_pointer;
using void_pointer = traits::void_pointer;
using const_void_pointer = traits::const_void_pointer;
using difference_type = traits::difference_type;
using size_type = traits::size_type;
using propagate_on_container_copy_assignment = traits::propagate_on_container_copy_assignment;
using propagate_on_container_move_assignment = traits::propagate_on_container_move_assignment;
using propagate_on_container_swap = traits::propagate_on_container_swap;
using is_always_equal = traits::is_always_equal;
template<class T>
struct rebind { using other = shared_copyable<typename traits::rebind_alloc<T>>; };
template<typename... Args>
explicit shared_copyable(Args... args)
: alloc {std::make_shared<Underlying>(std::forward<Args>(args)...)}
{}
pointer allocate(std::size_t n)
{
return alloc->allocate(n);
}
void deallocate(pointer p, std::size_t n)
{
alloc->deallocate(p, n);
}
template <typename... Args>
requires std::constructible_from<value_type, Args...>
void construct(pointer p, Args&&... args)
{
alloc->construct(p, args...);
}
void destroy(pointer p)
{
alloc->destroy(p);
}
}; | {
"domain": "codereview.stackexchange",
"id": 45479,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, memory-management, mocks",
"url": null
} |
c++, memory-management, mocks
void destroy(pointer p)
{
alloc->destroy(p);
}
};
// This wrapper is needed for code (such as some implementations
// of standard library) which assumes that allocator traits'
// destroy() and deallocate() never throw, even though these
// functions are not required to be noexcept.
template<typename Base>
requires requires { typename std::allocator_traits<Base>; }
struct no_delete_exceptions : Base
{
using traits = std::allocator_traits<Base>;
template<class T>
struct rebind { using other = no_delete_exceptions<typename traits::rebind_alloc<T>>; };
using Base::Base;
void deallocate(traits::pointer p, std::size_t n) noexcept
{
try {
traits::deallocate(*this, p, n);
} catch (std::exception& e) {
std::cerr << "deallocate error: " << e.what() << '\n';
}
}
void destroy(traits::pointer p) noexcept
{
try {
traits::destroy(*this, p);
} catch (std::exception& e) {
std::cerr << "destroy error: " << e.what() << '\n';
}
}
};
} // namespace alloc
#endif
As well as using these objects to test the fixed-size dynamic array linked above, I also made some unit tests:
#include <gtest/gtest.h>
#include <string>
using checked = alloc::checked<std::allocator<std::string>>;
TEST(Alloc, DoubleDeallocate)
{
checked a;
auto p = a.allocate(1);
EXPECT_THROW(a.assert_empty(), std::logic_error);
EXPECT_NO_THROW(a.deallocate(p, 1));
EXPECT_THROW(a.deallocate(p, 1), std::logic_error);
EXPECT_NO_THROW(a.assert_empty());
}
TEST(Alloc, DeallocateWrongSize)
{
checked a;
auto p = a.allocate(1);
EXPECT_THROW(a.deallocate(p, 2), std::logic_error);
// clean up
EXPECT_NO_THROW(a.deallocate(p, 1));
EXPECT_NO_THROW(a.assert_empty());
} | {
"domain": "codereview.stackexchange",
"id": 45479,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, memory-management, mocks",
"url": null
} |
c++, memory-management, mocks
// clean up
EXPECT_NO_THROW(a.deallocate(p, 1));
EXPECT_NO_THROW(a.assert_empty());
}
TEST(Alloc, DoubleConstruct)
{
checked a;
auto p = a.allocate(1);
EXPECT_NO_THROW(a.construct(p, ""));
EXPECT_THROW(a.construct(p, ""), std::logic_error);
// deallocate with live object
EXPECT_THROW(a.deallocate(p, 1), std::logic_error);
// clean up
EXPECT_NO_THROW(a.destroy(p));
EXPECT_NO_THROW(a.deallocate(p, 1));
EXPECT_NO_THROW(a.assert_empty());
}
TEST(Alloc, ConstructAfterDeallocate)
{
checked a;
auto p = a.allocate(1);
a.deallocate(p, 1);
EXPECT_NO_THROW(a.assert_empty());
EXPECT_THROW(a.construct(p, ""), std::logic_error);
// clean up
EXPECT_NO_THROW(a.assert_empty());
}
TEST(Alloc, DestroyWithoutConstruct)
{
checked a;
auto p = a.allocate(1);
EXPECT_THROW(a.destroy(p), std::logic_error);
// clean up
EXPECT_NO_THROW(a.deallocate(p, 1));
EXPECT_NO_THROW(a.assert_empty());
}
TEST(Alloc, DoubleDestroy)
{
checked a;
auto p = a.allocate(1);
EXPECT_NO_THROW(a.construct(p, ""));
EXPECT_NO_THROW(a.destroy(p));
EXPECT_THROW(a.destroy(p), std::logic_error);
// clean up
EXPECT_NO_THROW(a.deallocate(p, 1));
EXPECT_NO_THROW(a.assert_empty());
}
```
Answer: Missing thread-safety
The default allocator is thread-safe, and custom allocators might also be. So for your allocator adapters to not destroy the thread-safety, you need to make sure you handle that correctly.
logged: while this doesn't break anything when used from multiple threads, it might cause the log messages from being mixed together. You could add a mutex to prevent that, but I suggest just making sure you do everything with a single use of <<, as then it is likely that the underlying stream will handle it as an atomic operation. So for example:
std::clog << std::format("allocate {} = {}\n", n, pv); | {
"domain": "codereview.stackexchange",
"id": 45479,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, memory-management, mocks",
"url": null
} |
c++, memory-management, mocks
Also consider that other things might output to std::clog as well, not just your allocator adapters.
checked: use a mutex to guard population.
shared_copyable: despite std::shared_ptr being somewhat thread-safe, it's not safe to have two threads to access the exact same std::shared_ptr object at the same time. So consider whether you want to allow concurrent access to the same shared_copyable object.
no_delete_exceptions: same issue as logged. | {
"domain": "codereview.stackexchange",
"id": 45479,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, memory-management, mocks",
"url": null
} |
c++, memory-management, mocks
Check the return value of try_emplace()
In checked, consider checking the return value of try_emplace(). This could catch misbehaving allocators that return the same point for different allocations with overlapping lifetimes, but it could also still point to errors in the caller. Consider someone making two std::pmr::monotic_buffer_resource objects but accidentally giving them a pointer to the same buffer.
Note that this only catches errors if two allocations return exactly the same pointer. What if they have different pointers but the memory ranges overlap?
Interaction with placement-new/delete
The checked allocator also tracks construction and destruction using construct() and destroy(), but it might be legal to use bare placement-new instead of construct(), but still call destroy() afterwards. So your checker could return false positives, although for everyone's sanity of mind, it's of course much better to enforce that the same way is used to construct as to destroy.
Missing std::forward()
Not all your construct() functions use std::forward() to forward args. That brings me to:
Missing unit tests
There are lots of unit tests that need to be added. You should not only test for your allocator adapters performing their special functionality, but also that everything is passed to the underlying allocators correctly.
[[nodiscard]], constexpr and noexcept
C++20 made a lot of allocator operations constexpr, and added [[nodiscard]] to the return value of allocate().
While I don't think any of the STL's allocators have anything that is noexcept anymore, consider that someone might implement their own non-throwing allocator (for example, for use in real-time code). You could add noexcept(noexcept(…)) clauses to the members of logged and shared_copyable, but this won't work for checked as its use of a std::map means it cannot be noexcept. | {
"domain": "codereview.stackexchange",
"id": 45479,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, memory-management, mocks",
"url": null
} |
performance, c, programming-challenge
Title: Count frequency of words and print them in sorted order
Question: Objective:
Write a program to count the frequencies of unique words from standard input, then print them out with their frequencies, ordered most frequent first. For example, given this input:
The foo the foo the
defenestration the
The program should print the following:
the 4
foo 2
defenestration 1
The test input file will be the text of the King James Bible, concatenated 10 times.
Code:
I used the public-domain stb library stb_ds.h for the hash table.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stdbool.h>
#include <limits.h>
#include <ctype.h>
#define STB_DS_IMPLEMENTATION
#include "stb_ds.h"
/* pneumonoultramicroscopicsilicovolcanoconiosis - a lung disease caused by
* inhaling silica dust. */
#define LONGEST_WORD 45
#define CHUNK_SIZE (8 * 1024)
typedef struct count {
char *key;
size_t value;
} count;
static int cmp_func(const void *a, const void *b)
{
const count *const p = *(const count * const *) a;
const count *const q = *(const count * const *) b;
return (p->value < q->value) - (p->value > q->value);
}
static void replace_punctuation(size_t len, char s[static len])
{
// ".,;:!?\"()[]{}-"
static const char table[UCHAR_MAX + 1] = {
['.'] = '.' ^ ' ',[','] = ',' ^ ' ',[';'] = ';' ^ ' ',[':'] = ':' ^ ' ',
['!'] = '!' ^ ' ',['?'] = '?' ^ ' ',['"'] = '"' ^ ' ',['('] = '(' ^ ' ',
[')'] = ')' ^ ' ',['['] = '[' ^ ' ',[']'] = ']' ^ ' ',['{'] = '{' ^ ' ',
['}'] = '}' ^ ' ',['-'] = '-' ^ ' '
};
for (size_t i = 0; i < len; ++i) {
s[i] ^= table[((unsigned char *) s)[i]];
}
}
static count *load_ht(FILE * stream)
{
count *ht = NULL;
/* Store the string keys in an arena private to this hash table. */
sh_new_arena(ht);
char chunk[CHUNK_SIZE];
size_t offset = 0; | {
"domain": "codereview.stackexchange",
"id": 45480,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, c, programming-challenge",
"url": null
} |
performance, c, programming-challenge
char chunk[CHUNK_SIZE];
size_t offset = 0;
while (true) {
const size_t nread =
fread(chunk + offset, 1, CHUNK_SIZE - offset, stream);
if (ferror(stream)) {
shfree(ht);
return NULL;
}
if (nread + offset == 0) {
break;
}
/* Search for last white-space character in chunk and process up to there. */
/* Can we replace this with a library function? */
size_t curr_chunk_end;
for (curr_chunk_end = nread + offset - 1; curr_chunk_end != SIZE_MAX;
--curr_chunk_end) {
const unsigned char c = (unsigned char) chunk[curr_chunk_end];
if (isspace(c)) {
break;
}
}
/* How can we iterate the chunk just once? */
const size_t curr_chunk_size =
curr_chunk_end != SIZE_MAX ? curr_chunk_end : nread + offset;
replace_punctuation(curr_chunk_size, &chunk[0]);
size_t i = 0;
while (true) {
/* Malformed input? Perhaps add a check and make the program slower,
* or give the user what it deserves. */
char word[LONGEST_WORD];
size_t word_len = 0;
while (isspace((unsigned char) chunk[i])) {
++i;
}
const size_t start = i;
/* Profiling showed that much of the time is spent in this loop. */
for (; i < curr_chunk_size && !isspace((unsigned char) chunk[i]);
++i) {
word[word_len++] = (char) tolower((unsigned char) chunk[i]);
}
if (i == start) {
break;
}
word[word_len] = '\0'; | {
"domain": "codereview.stackexchange",
"id": 45480,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, c, programming-challenge",
"url": null
} |
performance, c, programming-challenge
word[word_len] = '\0';
/* Skip words beginning with a digit. */
if (!isdigit((unsigned char) word[0])) {
/* Strip possessive nouns. */
if (word_len >= 2 && word[word_len - 1] == 's'
&& word[word_len - 2] == '\'') {
word[word_len - 2] = '\0';
} else if (word[word_len - 1] == '\'') {
word[word_len - 1] = '\0';
}
const size_t new_count = shget(ht, word);
shput(ht, word, new_count + 1U);
}
}
/* Move down remaining partial word. */
if (curr_chunk_end != SIZE_MAX) {
offset = (nread + offset - 1) - curr_chunk_end;
memmove(chunk, chunk + curr_chunk_end + 1, offset);
} else {
offset = 0;
}
}
return ht;
}
int main(void)
{
count *ht = load_ht(stdin);
if (!ht) {
perror("fread()");
return EXIT_FAILURE;
}
size_t ht_len = shlenu(ht);
/* Profiling the code didn't show malloc()/mmap()/brk()/sbrk() to be a
* bottleneck.
*/
count **const ordered = malloc(sizeof *ordered * ht_len);
int rv = EXIT_FAILURE;
if (!ordered) {
goto cleanup;
}
for (size_t i = 0; i < ht_len; ++i) {
ordered[i] = malloc(sizeof **ordered);
if (!ordered[i]) {
while (i) {
free(ordered[i--]);
}
goto cleanup;
}
ordered[i]->key = ht[i].key;
ordered[i]->value = ht[i].value;
}
qsort(ordered, ht_len, sizeof *ordered, cmp_func);
for (size_t i = 0; i < ht_len; ++i) {
printf("%-*s\t%zu\n", LONGEST_WORD, ordered[i]->key, ordered[i]->value);
free(ordered[i]);
}
rv = EXIT_SUCCESS;
cleanup:
free(ordered);
shfree(ht);
return rv;
} | {
"domain": "codereview.stackexchange",
"id": 45480,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, c, programming-challenge",
"url": null
} |
performance, c, programming-challenge
rv = EXIT_SUCCESS;
cleanup:
free(ordered);
shfree(ht);
return rv;
}
The program has 127 LOC (excluding stb_ds.h), and the final executable (stripped) sizes around 19 KB.
And this is how it performed:
» time ./wordfreq < kjvbible_10.txt 1> /dev/null
./wordfreq < kjvbible_10.txt > /dev/null 2.39s user 0.10s system 99% cpu 2.507 total
-----------------------------------------------------------------------------------
» time wc kjvbible_10.txt
998170 8211330 43325060 kjvbible_10.txt
wc kjvbible_10.txt 1.12s user 0.02s system 98% cpu 1.156 total
Review Goals:
How can the total running time be reduced? What's a better algorithm for processing the file?
Does any part of my code invokes undefined behavior? Have I missed any edge-cases?
General coding comments, style, et cetera.
Answer: Some subtle concerns.
Same but different
This is an advanced concern that involves portability.
Consider an output where multiple words have the same frequency as with input "the the the foo defenestration foo defenestration foo defenestration". A result of
the 3
foo 3
defenestration 3
would be just as valid as:
defenestration 3
foo 3
the 3 | {
"domain": "codereview.stackexchange",
"id": 45480,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, c, programming-challenge",
"url": null
} |
performance, c, programming-challenge
would be just as valid as:
defenestration 3
foo 3
the 3
Recall that the specification details of qsort() do not specify the sub-order when the compare returns 0. Not all qsort() sort these equal cases in the same order.
Within a given compiler this is not a issue, yet consider that you are tasked with maintaining code across 10 platforms/compilers and the results were different, yet compliant, as above.
This situation occurred in my career and became a nightmare for our test engineer whose job was to verify equivalent functionality across the multiple platforms/compilers. It was not sufficient to simply compare for identical output, but manual analyses was done.
For OP, cmp_func() could be augmented to differentiate when a, b point to the same .value.
static int cmp_func_alt(const void *a, const void *b) {
const count *const p = *(const count * const *) a;
const count *const q = *(const count * const *) b;
// return (p->value < q->value) - (p->value > q->value);
int cmp = (p->value < q->value) - (p->value > q->value);
if (cmp == 0) {
cmp = (p < q) - (p > q); // Compare pointers
}
return cmp;
}
Similar problems occurred with stricmp() and rand().
stricmp() would convert to upper case and compare on some systems and convert to lower case then compare on others. This made _ before A on some systems and after Z on others, thus affecting the sorted order of strings. This was solved with rolling our own portable stricmp() code which outperformed some systems' stricmp() due to a little "trick".
rand() (this function varied a great deal per systems) was replaced with a superior yet consistent PRNG routine.
Pedantic: Access char data consistently
With C2X, this will not likely be a concern.
OP's code uses 2 ways to access char data. Best to use the same.
With char * s, is the following always true?
*((unsigned char *) s) == (unsigned char) *s | {
"domain": "codereview.stackexchange",
"id": 45480,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, c, programming-challenge",
"url": null
} |
performance, c, programming-challenge
It differs when char is signed and negative values are not encoded with 2's compliment.
<string.h> functions access char via unsigned char * and so I recommend this model.
For all functions in this subclause, each character shall be interpreted as if it had the type unsigned char (and therefore every possible object representation is valid and has a different value). C23dr § 7.26.1 4 | {
"domain": "codereview.stackexchange",
"id": 45480,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, c, programming-challenge",
"url": null
} |
python, performance, google-bigquery, apache-beam
Title: Efficiency of Apache Beam + BigQuery pipeline
Question: This is my first Apache pipeline. It takes a JSON file and saves the correctly formatted rows in one table, and the misformed rows in another.
My biggest worry is the efficiency. I have tested the code with the tiny (cr. 700 rows) file. The real input file is huge, it contains tens of millions of rows.
The error messages, PEP8 compliance etc. could have been better, but it's the least worry, for now at least.
The JSON format of the file is a bit unusual, the data is stored as attributes, thus the use of xml.etree.ElementTree.
from datetime import datetime
import os
import apache_beam as beam
import xml.etree.ElementTree as element_tree
temp_bucket = os.environ.get('google_temp_bucket', None)
wtbq_dict = {"method":"STREAMING_INSERTS"} if temp_bucket is None else {"custom_gcs_temp_location" : temp_bucket}
wtbq_dict["write_disposition"] = beam.io.BigQueryDisposition.WRITE_APPEND if temp_bucket is None else beam.io.BigQueryDisposition.WRITE_TRUNCATE
wtbq_dict["create_disposition"] = beam.io.BigQueryDisposition.CREATE_IF_NEEDED
wtbq_dict["ignore_unknown_columns"] = True
def to_datetime(s):
# input example: '2023-07-17T19:24:01.893'
result = datetime.strptime(s[:s.find('.')], '%Y-%m-%dT%H:%M:%S')
return result
types_map = { int : 'INTEGER', str : 'STRING', datetime : 'TIMESTAMP'}
results_structure = [('Id', int, 'id'), ('ViewCount', int, 'view_count'), ('CreationDate', datetime, 'creation_date') ]
results_table_schema = {'fields': [{'name' : col_name, 'type': types_map[col_type], 'mode': 'Required'} for _, col_type, col_name in results_structure] }
errors_structure = [(int, 'i'), (str, 'id'), (str, 'missing_keys'), (str, 'wrong_type_keys') ]
errors_table_schema = {'fields': [{'name' : col_name, 'type': types_map[col_type], 'mode': 'Required'} for col_type, col_name in errors_structure] }
def result_or_error(inp, _):
return 0 if 'i' not in inp else 1 | {
"domain": "codereview.stackexchange",
"id": 45481,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, google-bigquery, apache-beam",
"url": null
} |
python, performance, google-bigquery, apache-beam
def result_or_error(inp, _):
return 0 if 'i' not in inp else 1
def tree_elem_to_dict(i, el):
result = {}
missing_keys = []
wrong_type_keys = []
for tree_key, transform_type, result_key in results_structure:
if tree_key not in el.attrib:
missing_keys.append(tree_key)
continue
try:
transform_func = to_datetime if transform_type is datetime else transform_type
result[result_key] = transform_func(el.attrib[tree_key])
except:
wrong_type_keys.append([el.attrib[tree_key], transform_type, type(el.attrib[tree_key])])
if (missing_keys or wrong_type_keys):
return {'i' : i,
'id' : el.attrib['Id'] if 'Id' in el.attrib else 'unknown',
'missing_keys' : str(missing_keys),
'wrong_type_keys' : str(wrong_type_keys)}
else:
return result
def parse_into_dict(filename):
tree = element_tree.parse(filename)
results = [tree_elem_to_dict(i, el) for i, el in enumerate(tree.iterfind('row'))]
return results
def run_a_posts_pipeline(project_id, filename, wtbq_dict):
beam_options = beam.options.pipeline_options.PipelineOptions()
with beam.Pipeline(options=beam_options) as pipeline:
results, errors = ( pipeline | 'file_to_dict' >> beam.Create(parse_into_dict(filename))
| 'Partition' >> beam.Partition(result_or_error, 2))
results | 'tobq_results' >> beam.io.WriteToBigQuery(table=f'{project_id}:dummy2_dataset.dummy_posts_results',
schema=results_table_schema,
**wtbq_dict)
errors | 'tobq_errors' >> beam.io.WriteToBigQuery(table=f'{project_id}:dummy2_dataset.dummy_posts_errors',
schema=errors_table_schema,
**wtbq_dict) | {
"domain": "codereview.stackexchange",
"id": 45481,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, google-bigquery, apache-beam",
"url": null
} |
python, performance, google-bigquery, apache-beam
run_a_posts_pipeline(project_id="project_id", filename='filename.xml', wtbq_dict=wtbq_dict)
Answer: imports
It would be convenient to use from ... import ...,
so we can write WRITE_APPEND
rather than beam.io.BigQueryDisposition.WRITE_APPEND.
I don't know what "wt" in "wtbq" big query means, but that's fine.
Using two-space indent is normal for some languages,
but absolutely not in python.
It makes the source code harder to read than necessary.
Use black -S *.py
to fix up your source every now and again.
validate args
def to_datetime(s):
# input example: '2023-07-17T19:24:01.893'
result = datetime.strptime(s[:s.find('.')], '%Y-%m-%dT%H:%M:%S')
Thank you for the example in the comment; that's helpful.
Directly returning the strptime expression would have been fine --
no need to name it result.
The .find() call should be .index().
Or we should assert '.' in s.
Or we should document that "dot is optional",
and behave gracefully when it's absent.
I feel it is mostly reasonable for caller to pass in
'2023-07-17T18:24:19', despite that helpful comment.
It is entirely unreasonable that caller gets
datetime(2023, 7, 17, 18, 24, 1), which is eighteen seconds off,
with no indication that things went badly.
Either raise fatal error, or compute a more plausible result.
As things stand this is minimally a documentation defect,
and would likely be viewed as a code defect.
state things in the positive
def result_or_error(inp, _):
return 0 if 'i' not in inp else 1
This would be slightly easier for humans to read if it were phrased:
def result_or_error(inp, _):
return 1 if 'i' in inp else 0
and much easier if phrased:
def result_or_error(inp, _) -> int:
return int('i' in inp)
The name is misleading --
on my initial reading of the signature I thought we would
get back either a proper result or some failure object like None.
Eventually I read that it's responsible for two-bin partitioning.
design of public API
def tree_elem_to_dict(i, el): | {
"domain": "codereview.stackexchange",
"id": 45481,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, google-bigquery, apache-beam",
"url": null
} |
python, performance, google-bigquery, apache-beam
It's pretty clear that 2nd parameter is a tree element.
But declaring i: int would have been helpful.
Or even better, offer a """docstring""" that
mentions the meaning of i.
'id' : el.attrib['Id'] if 'Id' in el.attrib else 'unknown',
This defaulting is more clearly expressed as:
'id' : el.attrib.get('Id', 'unknown'),
These str() calls are surprising:
'missing_keys' : str(missing_keys),
'wrong_type_keys' : str(wrong_type_keys)
We can't keep them as lists? Ok, fine.
Consider using repr() instead, so quoting issues are less worrisome.
plural
This is manifestly the wrong name:
def parse_into_dict(filename):
Clearly it returns a list of dicts.
type annotation
This is clear as written:
def run_a_posts_pipeline(project_id, filename, wtbq_dict):
It would be preferable to rename 3rd parameter:
def run_a_posts_pipeline(project_id: int, filename: Path, wtbq: dict) -> None:
performance
The review context suggests that speed matters.
Yet this submission's source code contains no description of
typical JSONL line size or complexity
observed app level throughput, in lines per second
automated tests
anticipated input line error rate
profiling
Minimally I anticipated that run_a_posts_pipeline
would log a line count and elapsed time.
We don't know if most of the time was spent in this
code or in the ElementTree library.
It is possible that competing libraries would perform
better on this task, but first we'd want some measurements.
Suppose this code runs in production for a couple of months,
and then a maintenance engineer makes some edits.
How would we know if a performance regression was introduced?
This code appears to achieve its design goals.
It is written in a reasonably clear style.
Despite the fact that I don't know exactly what it does, I would
be willing to delegate or accept maintenance tasks on it. | {
"domain": "codereview.stackexchange",
"id": 45481,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, performance, google-bigquery, apache-beam",
"url": null
} |
java, object-oriented, enum
Title: Java Clean Code, use of enums in data structure
Question: I have defined a data structure in Java that allows to manage certain type of elements like a Queue, with the special feature of using 2 inner rows (windows) to attend elements:
import java.util.ArrayList;
public class TwoWindowsQueue<E> {
enum WindowState {CLOSED, OPEN}
enum WindowTurn {WINDOW1, WINDOW2}
private ArrayList<E> window1, window2;
private WindowState windowState1, windowState2;
private WindowTurn turn;
public TwoWindowsQueue(E[] elements) {
setWindowState1(WindowState.OPEN);
setWindowState2(WindowState.OPEN);
setTurn(WindowTurn.WINDOW1);
for (E elem : elements) {
insert(elem);
}
}
public WindowTurn getTurn() {
return turn;
}
private void setTurn(WindowTurn turn) {
this.turn = turn;
}
public void closeQueue() {
if (pendingElementsInQueue() > 0)
throw new IllegalStateException("Elements are pending");
setWindowState1(WindowState.CLOSED);
setWindowState2(WindowState.CLOSED);
}
private void setWindowState1(WindowState state) {
windowState1 = state;
}
private void setWindowState2(WindowState state) {
windowState2 = state;
}
public boolean isWindow1Open() {
return windowState1 == WindowState.OPEN;
}
public boolean isWindow2Open() {
return windowState2 == WindowState.OPEN;
}
public void closeWindow() {
if (pendingElementsInWindow1() <= pendingElementsInWindow2()) {
transfer(window1, window2, pendingElementsInWindow1());
setWindowState1(WindowState.CLOSED);
setTurn(WindowTurn.WINDOW2);
} else {
transfer(window2, window1, pendingElementsInWindow2());
setWindowState2(WindowState.CLOSED);
setTurn(WindowTurn.WINDOW1);
}
} | {
"domain": "codereview.stackexchange",
"id": 45482,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, object-oriented, enum",
"url": null
} |
java, object-oriented, enum
public void openWindow() {
if (windowState1 == WindowState.CLOSED) {
setWindowState1(WindowState.OPEN);
return;
}
if (windowState2 == WindowState.CLOSED)
setWindowState2(WindowState.OPEN);
}
public boolean isInQueue(E elem) {
return window1.contains(elem) || window2.contains(elem);
}
public int pendingElementsInQueue() {
return pendingElementsInWindow1() + pendingElementsInWindow2();
}
public int pendingElementsInWindow1() {
return window1.size();
}
public int pendingElementsInWindow2() {
return window2.size();
}
public void insert(E elem) {
if (elem == null) throw new IllegalArgumentException("null");
validateWindowsClosed();
if (isInQueue(elem))
throw new IllegalStateException("Duplicate element");
if (isWindow1Open() && isWindow2Open()) {
if (pendingElementsInWindow1() <= pendingElementsInWindow2()) {
window1.add(elem);
} else {
window2.add(elem);
}
return;
}
if (isWindow1Open()) window1.add(elem);
if (isWindow2Open()) window2.add(elem);
}
public boolean isQueueClosed() {
return !isWindow1Open() && !isWindow2Open();
}
private void validateQueueEmpty() {
if (pendingElementsInQueue() == 0)
throw new IllegalStateException("Queue is empty");
}
private void validateWindowsClosed() {
if (isQueueClosed())
throw new IllegalStateException("Both windows are closed");
}
public E elementToServe() {
validateQueueEmpty();
validateWindowsClosed();
if (getTurn() == WindowTurn.WINDOW1) {
return window1.get(0);
} else {
return window2.get(0);
}
} | {
"domain": "codereview.stackexchange",
"id": 45482,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, object-oriented, enum",
"url": null
} |
java, object-oriented, enum
public void serve() {
validateQueueEmpty();
validateWindowsClosed();
if (getTurn() == WindowTurn.WINDOW1) {
window1.remove(0);
if (isWindow2Open()) setTurn(WindowTurn.WINDOW2);
return;
} else {
window2.remove(0);
if (isWindow1Open()) setTurn(WindowTurn.WINDOW1);
}
}
public void balance() {
if (!isWindow1Open() || !isWindow2Open())
throw new IllegalStateException("Cannot balance");
int diff = pendingElementsInWindow1() - pendingElementsInWindow2();
if (diff > 1) {
transfer(window1, window2, diff / 2);
} else if (diff < -1) {
transfer(window2, window1, diff / 2);
}
}
private void transfer(ArrayList<E> source, ArrayList<E> destination, int amount) {
while (amount > 0) {
destination.add(source.get(source.size() - amount));
source.remove(source.size() - amount);
amount--;
}
}
}
As you can see, I have used enums in an attempt to make the code easier to understand and maintain. But my question is, for the windows state and turn, is there any other alternative to enums that is Clean Code friendly?
Also, if enums are the only option, is there any way to simplify its usage?
Answer: Let me start off by addressing this point.
As you can see, I have used enums in an attempt to make the code easier to understand and maintain. But my question is, for the windows state and turn, is there any other alternative to enums that is Clean Code friendly? | {
"domain": "codereview.stackexchange",
"id": 45482,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, object-oriented, enum",
"url": null
} |
java, object-oriented, enum
Enums are about as "Clean Code friendly" as you can get. They are explicit and clear, easy to understand, and they give you both Value Safety and Exhaustiveness Checking. They are powerful, and you should use them any chance that you get. Especially if you are coding Java, because Java enums are more powerful than any other (non-JVM) languages enumerated values.
Now, there is still some room for improvement.
For example, you have enum WindowTurn, but yet, you write private WindowState windowState1, windowState2;? This is unwise because, what happens if you add a new value to WindowTurn? There are all these places that need to be updated. So no, I think your choice to use an enum was very wise, but you are not utilizing it to its full extent.
The proper solution here would be to use a java.util.Map, where the key is WindowTurn, and the value would be some object that handles both WindowState as well as the ArrayList for each Window.
Really, I think that is the biggest flaw in your code -- all of the class will completely fall apart if you decide to add a new WindowTurn. If you were to refactor your solution to do things like I described, you could add a new WindowTurn value, and everything would work with no changes required. That is clean code.
That said, making the change would require you to refactor your API. As is now, this class has methods for 1 and 2. So, if you were to add a 3rd, you would now need a 3 variant for all of the methods. That is not clean code at all. So, instead, all the methods that have a 1/2 variant, change them to take in WindowTurn, and then you can make the logic general enough to apply to the relevant WindowTurn.
Again, the key concept here is that you should be able to take WindowTurn and either add or remove values from it, and the class should just work. Not only is that clean, it's maintainable. | {
"domain": "codereview.stackexchange",
"id": 45482,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, object-oriented, enum",
"url": null
} |
java, object-oriented, enum
Let me finish by addressing your points directly now.
We needed to understand the above concepts, but now that we do, we can answer your questions simply.
As you can see, I have used enums in an attempt to make the code easier to understand and maintain.
Indeed you have, but you are only partway there, for reasons mentioned above.
But my question is, for the windows state and turn, is there any other alternative to enums that is Clean Code friendly?
There certainly are, but I see nothing else that would be as "Clean Code friendly" as using an enum. Like I said, you are not using enums to their full capability, and for that reason, you are losing out on "cleanliness". But an enum used correctly? In my opinion, using an enum correctly is cleanliness at its MAXIMUM. At least for this code example.
Also, if enums are the only option, is there any way to simplify its usage?
Like I said, using them correctly. All mentioned above. | {
"domain": "codereview.stackexchange",
"id": 45482,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, object-oriented, enum",
"url": null
} |
c++, beginner
Title: First C++ Program: Race Car Driver Points Calculator
Question: Task:
Write a program that is given three numbers corresponding to the
number of times a race car driver has finished first, second, and
third. The program computes and displays how many points that driver
has earned given 5 points for a first, 3 points for a second, and 1
point for a third place finish.
Code:
#include <iostream>
#include <array>
#include <cstdlib>
namespace /* unnamed */ {
int compute_points(const std::array<int, 3> &counts)
{
/* [0] --> 5 points each.
* [1] --> 3 points each.
* [2] --> 1 point each.
*/
return (counts[0] * 5) + (counts[1] * 3) + counts[2]; // Overflow?
}
}
int main()
{
std::array<int, 3> counts{};
std::cout << "Enter the number of first, second and third place finishes: ";
std::cin >> counts[0] >> counts[1] >> counts[2]; // Error-checking?
std::cout << "Total points: " << compute_points(counts) << " \n";
return EXIT_SUCCESS;
}
Review Requests:
General coding comments, bad practices, style et cetera.
Answer: Firstly, let's answer the questions:
return (counts[0] * 5) + (counts[1] * 3) + counts[2]; // Overflow?
Given that int must be able to represent at least -32767 to +32767, a driver would have to participate in over 6,000 races to overflow int, so this is probably sufficient for our purposes. But we can easily double that by using unsigned int, since we don't expect any negative counts.
std::cin >> counts[0] >> counts[1] >> counts[2]; // Error-checking?
Yes, we require some error checking here. It can be as simple as
if (!std::cin) {
std::cerr << "Input failed\n";
return EXIT_FAILURE;
} | {
"domain": "codereview.stackexchange",
"id": 45483,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, beginner",
"url": null
} |
c++, beginner
When designing a program like this, it's a good idea to consider which requirements are rigid and which are arbitrary values which could change if we apply this to a different competition (perhaps next year's contest?).
In our case, the things that could be changed in a future version include:
how many scoring positions are counted, and
the specific scores for each position.
We should endeavour to make it easy for a future maintainer to be able to change either of these properties. At present, changing the number of scoring positions is particularly onerous, as it requires changing how we read inputs and perform the calculation.
If we make an array of the scores for each position, that can help us:
static constexpr auto scores =
std::to_array({5u, 3u, 1u}); // first, second, ...
Then we can derive the number of scoring places:
static constexpr auto places = scores.size();
And we can define a type to represent a driver's results summary, and use that in our function signature:
using results = std::array<unsigned, places>;
unsigned compute_points(const results &counts)
When we implement the function, it's good to have a knowledge of the standard algorithms library. In our case, we're performing a dot-product of two vectors (in mathematical terms). That corresponds to std::inner_product(), declared in <numeric>:
unsigned compute_points(const results &counts)
{
return std::inner_product(scores.begin(), scores.end(),
counts.begin(), 0u);
}
We'll want the main() function to read the correct number of results values. We can use a loop for (auto i = 0uz; i < places; ++i). But it's instructive to see how we can use the standard library to avoid hand-writing the loop.
We'll use the std::copy() algorithm with a std::istream_iterator to read the values:
#include <algorithm>
#include <iterator> | {
"domain": "codereview.stackexchange",
"id": 45483,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, beginner",
"url": null
} |
c++, beginner
std::copy_n(std::istream_iterator<unsigned>(std::cin),
places, counts.begin());
Modified program
Here's the version incorporating these improvements that make future adjustments a simple matter of changing the scores list:
#include <algorithm>
#include <array>
#include <cstdlib>
#include <iterator>
#include <numeric>
#include <iostream>
namespace /* unnamed */ {
static constexpr auto scores =
std::to_array({5u, 3u, 1u}); // first, second, ...
static constexpr auto places = scores.size();
using results = std::array<unsigned, places>;
unsigned compute_points(const results &counts)
{
return std::inner_product(scores.begin(), scores.end(),
counts.begin(), 0u);
}
}
int main()
{
std::cout << "Enter the number of first, second and third place finishes: ";
results counts{};
std::copy_n(std::istream_iterator<unsigned>(std::cin),
places, counts.begin());
if (!std::cin) {
std::cerr << "Input failed\n";
return EXIT_FAILURE;
}
std::cout << "Total points: " << compute_points(counts) << " \n";
} | {
"domain": "codereview.stackexchange",
"id": 45483,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, beginner",
"url": null
} |
c++, parsing
Title: Natural language text fast tokenizer (Rev.2)
Question: This is the second iteration of the Natural language text fast tokenizer code review. Special thanks goes to G. Sliepen and Toby Speight who conducted the first review.
Functional specification
Implement a function for fast tokenization of text in char[] buffer handling some natural language specifics below:
Consider ‘ ‘ (space) as a delimiter, keeping a way to extends the list of delimiters later.
Extract stable collocations like “i.e.”, “etc.”, “…” as a single lexem.
In case word contains characters like ‘-‘ and ‘’’ (examples: semi-column, half-, cat’s) return the whole construct as a whole lexem. (3.1) Otherwise, split word and return parts as separate lexems and each non-alphanumeric "inword" symbols as separate lexems.
Return sequences of numbers (integers without signs) as a single lexem.
Performance is critical, since the amount of data is huge. The function should be thread-safe.
Note: The defect in specification was found by Matthieu M in question for first code review when I already posted current Rev.2.: any "inword" non-alphanumeric character except described in item 3 above returns as separate lexems which contradicted original functional specification. Since I still in two minds about behaviour I want here, the item 3.1 has been added which describes how it works now. If you change the code and removing 3.1 to keep lexems with internal non-alphanumeric as a whole (like "a|b") will simplify the code, please feel free to fix on the fly. I will fix this in Rev3.
Answers/comments on the items in the first Code Review iteration | {
"domain": "codereview.stackexchange",
"id": 45484,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
I am not sure we need tokenize function since we can just use TokenRange(sample); why do we need a wrapper here? Any purpose or risks without it?
Thank you for the explanation on the full std::ranges approach. Taking into account that I will need to use the tokenized with the execution policies which are still missing in std::ranges, I will stay with current implementation so far; considering to make view just for learning ranges/views/projections.
The question about regex library usage is still on the table. Isn’t regex the tool intended exactly for these purposes? Shouldn’t the code with regex be shorter in times? The only concern is performance, which I mentioned in the first post.
I decided not to inherit Iterator to std::iterator because of KISS principle; so far I don’t see any benefits from it. When I see how this could be really used, I will rework this as a separate exercise.
Changelog
The code reworked to ranges approach with TokenRange.
To avoid offset recalculation for operator++, the mutable next field introduced.
Recent C++ features used including switching to std::isdigit and std::isalpha.
Concerns | {
"domain": "codereview.stackexchange",
"id": 45484,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
Concerns
The main issue is this Iterator::end(). In the tutorials they say that the best approach is to transparently pass the end() iterator of underlying data, but since here I can’t return data.end(), the empty range is returned and I am still in two minds if end() is the only case when empty range could occur, so this could lead to false-positive check to end() if I am correct. Please, comment.
Mutable next to pass the offset from operator* to operator++ is not the best solution here, but I don’t see anything better. Any suggestions?
Still don’t like the code in the inner while loop in the TokenRange::Iterator::operator*. It is hard to grasp and prove to be correct, but don’t see any way to simplify it.
Still not sure if class Iterator is the best place for these dilimiters, stable_lexems and inword_lexems; I want them to be configurable later, but still in two minds if they should belong to TokenRange or to Iterator, since formally they don’t define the range, but the way to iterate the range. On the other hand, any hardcoded values is evil, so I should request them in constructor and I don’t want to have them in every iterator’s ctor. Putting them into TokenRange and copy to Iterator on creation to be able to adjust seems as overengineering.
The code
Here is the updated code for the code review; could you please take a look and suggest further ways to improve?
Fully functional demo.
#include <algorithm>
#include <cstring>
#include <iostream>
#include <ranges>
#include <vector>
class TokenRange {
std::string_view data;
public:
class Iterator {
const std::string_view delimiters = " ";
const std::vector<std::string_view> stable_lexems = { "i.e.", "etc.", "..." };
const std::string_view inword_lexems = "-\'"; | {
"domain": "codereview.stackexchange",
"id": 45484,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
std::string_view data;
mutable size_t next = ((size_t)-1);
public:
Iterator(std::string_view data = {}) : data(data) { skip_delimiters(); }
std::string_view operator*() const;
Iterator& operator++();
friend bool operator==(const Iterator& it1, const Iterator& it2) { return it1.data == it2.data; }
private:
void skip_delimiters();
};
TokenRange(std::string_view data) : data(data) {}
Iterator begin() {
return Iterator(data);
}
Iterator end() {
return {};
}
};
void TokenRange::Iterator::skip_delimiters()
{
size_t skip = 0;
while (skip < data.size() && std::ranges::contains(delimiters, data[skip])) {
++skip;
}
data.remove_prefix(skip);
}
std::string_view TokenRange::Iterator::operator*() const
{
size_t i = 0;
while (!data.empty() && i == 0) {
auto it = std::ranges::find_if(stable_lexems, [&](auto stable_lexem)
{
return data.starts_with(stable_lexem);
});
if (it != stable_lexems.end()) {
next = it->size()+(data.size()-data.size());
return data.substr(0, it->size());
}
while (i < data.size() && !std::ranges::contains(delimiters, data[i])) {
const bool is_inword_char = std::ranges::contains(inword_lexems, data[i]);
if (is_inword_char && i != 0 && std::isalpha(data[i - 1])) {
++i;
continue;
}
if (!std::isalpha(data[i]) && !std::isdigit(data[i])) {
if (i == 0) {
++i;
}
break;
}
++i;
}
}
next = i + (data.size() - data.size());
return data.substr(0,i);
}
TokenRange::Iterator& TokenRange::Iterator::operator++()
{
if (next == ((size_t)-1)) {
operator*();
}
data.remove_prefix(next);
skip_delimiters(); | {
"domain": "codereview.stackexchange",
"id": 45484,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
data.remove_prefix(next);
skip_delimiters();
next = ((size_t)-1);
return *this;
}
int main()
{
std::string sample = "Let's consider, this semi-simple sample, i.e. test data with ints: 100, etc. For ... some testing...";
for (auto token : TokenRange(sample)) {
std::cout << token << " | ";
}
}
Performance
I was really surprised with the performance results I got with this new code. Tested on my local PC, the new code is about 18% faster than my original code despite the fact that std::string_view is at least twice larger (pointer and size) than const char*.
I expected the new version to be at least somewhat slower if not dramatically, but it seems that std::ranges library does its work best to provide code which compiler could optimize even better than my original code.
So, at least at this stage no ground for being afraid of expenses.
Being honest, I was used to think that low-level C-style which is closer to assembler language should compile better; the reality is different for our luck.
Here is a demo for those who interested in performance measurements.
Please, note that godbolt.org is not suited for performance measurements (at least, as I know it), so the results could differ dramatically. You could just copy the code to your local PC and check. | {
"domain": "codereview.stackexchange",
"id": 45484,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
Answer: I've make a partial review below. It's only partial, because it's become clear that the code is a bit too much work in progress. Not only the formatting, but also the correctness. Further, that also makes it pretty hard to follow. In general, it would help if you sometimes described in prose what a section of code does. I believe the size-size issue below was caused by refactoring and changing the meaning of code, too. Having those comments and proof-reading them afterwards helps avoid these issues. That said, despite these flaws, your code is not a mess and generally well-structured.
Formatting Consistency
Sometimes you put the opening { and closing } on the same line with the function header, sometimes only the opening { and sometimes neither. Use one style consistently. Don't cram too much into one line. The compiler won't generate smaller or faster code for it, but it will make the code harder to read and understand.
Cornercase Skipping Space Characters
The function skipping the initial spaces will fail if there is only spaces. I'd rewrite it like this:
while (true) {
if (input is empty) {
break;
}
if (input[0] is not a delimiter) {
break;
}
input.drop_prefix(1);
}
Putting multiple condition checks into the while condition won't generate smaller or faster code. It does make it less readable which in turn makes it more likely that you miss errors.
Use Of C-Style Casts
I'm referring to ((size_t)-1), which is bad for two reasons. Firstly, it's a magic number. Secondly, it uses C-style casts. Using a named constant (npos for example) would be much more readable.
Mutable Data
You raised the question concerning the mutable next yourself and whether it's necessary. I don't think it is. If you look at what's happening on the input sequence, you find these steps:
skip separators
find end of token to process it
skip to end of token
go to step 1 | {
"domain": "codereview.stackexchange",
"id": 45484,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
skip separators
find end of token to process it
skip to end of token
go to step 1
I've ignore the checks for EOF here for now, since it's not important here. The point is that step 2 (operator *) assumes a constant iterator. However, it must store the end of the token for step 3 (operator ++) in order to not repeat the forward scan there.
As alternative, I'd combine skipping separators and scanning for the end of the token. So, in the constructor, you do that once from the initial position and remember both the start and end of the token. operator* then just returns the subsequence you determined there. operator++ then drops the determined token from the head of the sequence and then searches for the next exactly like the constructor.
As additional benefit, multiple or zero calls to operator* will be handled much more gracefully.
Unicode Compliance
Is that even a goal for you? If your input is plain ASCII, quite a few things simplify. For example, all the locale-specific is* can much easier be implemented as ranges::contains() calls.
Skipping Stable Lexems
The code there does (data.size()-data.size()), which makes no sense to me, it's zero. Does this even work correctly? | {
"domain": "codereview.stackexchange",
"id": 45484,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
Title: Natural language text fast tokenizer (Rev.3)
Question: This is the third iteration of the Natural language text fast tokenizer code review. Special thanks goes to G. Sliepen, Toby Speight and uli who conducted previous reviews and to Matthieu M. and Adrian McCarthy who participated with important findings.
Functional specification
Implement a function for fast text tokenization handling some natural language specifics below:
Consider ‘ ‘ (space) as a delimiter, keeping a way to extend the list of delimiters later; the delimiters couldn’t be a part of other constructs.
Extract stable collocations like “i.e.”, “etc.”, “…” as a single lexem.
In case word contains “inword” characters like ‘-‘ and ‘’’ (examples: semi-column, half-, cat’s) return the whole construct as a whole lexem.
Threat all other non-alphanumeric characters as separate lexems.
Return sequences of numbers (integers without signs) as a single lexem.
Performance is critical, since the amount of data is huge. The function should be thread-safe.
Changes
The code has been reworked according to most of code review points.
The only exception is brackets { and } formatting consistency; I am still in two minds if my approach somewhere should be replaced with lengthier one.
I added the fast::isalpha (12% faster than std::isalpha) (works in assumption of 8-bit encoding) and fast::isdigit (5% faster than std::isdigit, although a little bit risky). Of course, not these percents is a point here, but the dramatic slowdown of these std operations when it comes to work with some specific locale.
Concerns
The only place I still don’t like is this calculation of offset in operator++(), but I can’t see the better way:
if (!lexem.empty()) {
std::size_t offset = lexem.data() - data.data() + lexem.size();
data.remove_prefix(offset);
}
Reservations | {
"domain": "codereview.stackexchange",
"id": 45485,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
Reservations
Methods implementation inside of the class definition done only to the sake of brevity; production code will have them implemented separately.
I have no idea why in set_locale when auto const func compiled with Clang with [&locale](unsigned char c) leads to 'std::bad_cast' while with MSVC it works just fine; the only way Clang works is with [&locale](char c), even [&locale](unsigned int c) doesn’t work. So, I have to leave this as is on godbolt.org. If you know the reason, please help me to fix to make this portable.
The code
Here is the updated code for the code review; could you please take a look and suggest further ways to improve or confirm that this is ready to go code?
Fully functional demo.
#include <algorithm>
#include <cstring>
#include <iostream>
#include <numeric>
#include <ranges>
#include <vector>
namespace fast {
class IsAlpha
{
std::array<unsigned char, std::numeric_limits<unsigned char>::max() + 1> cache = {};
public:
explicit IsAlpha(const std::locale& locale = {})
{
set_locale(locale);
}
void set_locale(const std::locale& locale)
{
auto const func = [&locale](unsigned char c) { return std::isalpha(c, locale); };
std::ranges::copy(std::views::iota(0u, cache.size())
| std::views::transform(func),
cache.begin());
}
bool operator()(unsigned char c) const { return cache[c]; }
};
IsAlpha isalpha;
bool isdigit(unsigned char c) { return c >= '0' && c <= '9'; }
}
class TokenRange {
std::string_view data;
public:
class Iterator {
const std::string_view delimiters = " ";
const std::vector<std::string_view> stable_lexems = { "i.e.", "etc.", "..." };
const std::string_view inword_symbols = "-\'"; | {
"domain": "codereview.stackexchange",
"id": 45485,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
std::string_view data;
std::string_view lexem;
public:
Iterator() {}
Iterator(std::string_view data) : data(data) { extract_lexem(); }
std::string_view operator*() const { return lexem; }
Iterator& operator++();
friend bool operator==(const Iterator& it1, const Iterator& it2) { return it1.data == it2.data; }
private:
void extract_lexem();
};
TokenRange(std::string_view data) : data(data) {}
Iterator begin() {
return Iterator(data);
}
Iterator end() {
return {};
}
};
void TokenRange::Iterator::extract_lexem()
{
while (!data.empty() && std::ranges::contains(delimiters, data.front())) {
data.remove_prefix(1);
}
auto it = std::ranges::find_if(stable_lexems, [&](auto stable_lexem)
{
return data.starts_with(stable_lexem);
});
if (it != stable_lexems.end()) {
lexem = data.substr(0, it->size());
return;
}
std::size_t index = 0;
while (index < data.size())
{
if (std::ranges::contains(delimiters, data[index])) {
break;
}
const bool is_not_alphanumeric = !fast::isalpha(static_cast<unsigned char>(data[index])) && !fast::isdigit(static_cast<unsigned char>(data[index]));
if (is_not_alphanumeric) {
if (index == 0) {
++index;
}
break;
}
const bool is_next_char_inword_symbol = (index+1) < data.size() ? std::ranges::contains(inword_symbols, data[index+1]) : false;
if (is_next_char_inword_symbol) {
++index;
}
++index;
}
lexem = data.substr(0, index);
}
TokenRange::Iterator& TokenRange::Iterator::operator++()
{
if (!lexem.empty()) {
std::size_t offset = lexem.data() - data.data() + lexem.size();
data.remove_prefix(offset);
}
extract_lexem();
return *this;
} | {
"domain": "codereview.stackexchange",
"id": 45485,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
int main()
{
{
std::string sample = "Let's consider, this semi-simple sample, i.e. test data with ints: 100 and 0x20u, etc. For ... some testing...";
for (auto token : TokenRange(sample)) {
std::cout << token << " | ";
}
}
}
Performance
For those who interested in performance evaluations, here is the code with tests.
Please, note that godbolt.org is not suited for performance measurements (at least, as I know it), so the results could differ dramatically. You could just copy the code to your local PC and check.
Answer: Answers to your questions
Concerns
The only place I still don’t like is this calculation of offset in operator++(), but I can’t see the better way:
I think it's a very minor issue. However, if you want to get rid of it in operator++(), I would make extract_lexem() update data directly. Then the only issue is that data is used in operator==(). But instead of comparing two whole string views there, consider just comparing the lexem.data() pointers to each other:
friend bool operator==(const Iterator& it1, const Iterator& it2) {
return it1.lexem.data() == it2.lexem.data();
}
Reservations
Methods implementation inside of the class definition done only to the sake of brevity; production code will have them implemented separately.
I personally don't mind if short functions are defined inside the class if it's going to be all header-only, but otherwise they should indeed be defined separately in a source file.
I have no idea why in set_locale when auto const func compiled with Clang with [&locale](unsigned char c) leads to 'std::bad_cast' […] | {
"domain": "codereview.stackexchange",
"id": 45485,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
See Toby Speight's answer.
Why is fast::isdigit() not locale-aware?
It's really weird to see you set up a cache for a locale-aware isalpha(), but then have a very simple isdigit() that just assumes digits are always like in US-ASCII. Are you really sure that's the case for all possible locales?
Move casting into fast::isalpha()/isdigit()
I would make fast::isalpha() and fast::isidigit() take a regular char as a parameter, and have those functions do the casting to unsigned char. That simplifies the code using it.
Split up extract_lexem()
Consider splitting up some of the code in extract_lexem() into separate functions. For example, the first three lines could be put into a skip_delimiters() function. This helps simplify extract_lexem() and also provides self-documentation.
About in-word lexems
Natural language is messy. I see why you want to be able to parse Let's as one token. But what if they are used as single quotes, like in 'foo' or 'hello world'? A trailing apostrophe is considered to be in-word by your tokenizer. And my examples here are where you don't want to consider that as part of a word, but in English you can have a trailing apostrophe when you have a plural of a possessive noun, like in the parents' children. And other languages might have very different rules. | {
"domain": "codereview.stackexchange",
"id": 45485,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c, tree
Title: Simple tree dictionary structure in C
Question: Good morning, everyone.
I have developed a C library whose goal is to store strings in an easily searchable tree structure (which I have dubbed tree-dictionary). I come from a math background and I have been coding in Python for a few months, but I am a still a complete beginner to C and this is my first non-trivial project in the language. My goal was to learn the basics of C syntax, non-OOP programming, and hands-on memory management.
Here comes the code:
/**
* @file treedict.h
* @author
* @brief Define a tree dictionary that stores words using a tree data structure.
*
* The dictionary will only have three basic functionalities: it will be able to store a word, to check
* if a word is in it, and, finally, to delete a word.
*
* The dictionary is defined in three steps: first, define the nodes of the tree and the related functions.
* Then, define the functions that work with the whole tree.
* Finally, define the functions that use the tree as a dictionary.
*
* Internally, each word is represented as a sequence of consecutive nodes originating at the root of the
* tree. Each node contains a letter and the last letter is marked as an end-of-word node.
* A single node of the tree may contain a letter belonging to more than one word. The words are added
* in such a way that the number of nodes is minimized.
*
* Example: a dictionary that contains the words "ant", "antilope", "animal" and "animalistic"
* can be represented as follows:
*
* a n
* i m a l E
* i s t i c E
* t E
* i l o p e E
*
* The root of a tree dictionary is supposed to be a node containing the null character '\0'.
*/
#ifndef TREEDICT_H
#define TREEDICT_H
#include <stdio.h>
#include <stdlib.h>
#include "bool.h"
/*********************************** NODE DEFINITION ****************************/ | {
"domain": "codereview.stackexchange",
"id": 45486,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, tree",
"url": null
} |
c, tree
/*********************************** NODE DEFINITION ****************************/
/**
* @struct Node
* @brief Datatype for the nodes of the tree.
*/
struct Node
{
struct Node **children; /*< Array of pointers to the children of the node. */
int length_of_children_array; /*< Length of allocated children array
(NB: this is not the number of children, just the memory allocated for
their pointers.)
*/
int number_of_children; /*< Number of children of the node. */
char letter; /*< Letter represented by the node. */
bool is_end_of_word; /*< Bool marking whether this is an end-of-word node or not. */
};
typedef struct Node Node;
/*********************************** NODE FUNCTIONS ****************************/
/**
* @brief Initialize a new node instance.
*
* @param letter
* Letter of the created node.
* @return Node*
* Pointer to the created node.
*/
Node *create_node(char letter);
/**
* @brief Free a node and its children array from memory.
*
* @param node
*/
void delete_node(Node *node);
/**
* @brief Add child node to parent node.
*
* @param child
* @param parent
*/
void add_child(Node *parent, Node *child);
/**
* @brief Check if a node has children or not.
*
* @param node
* @return true
* @return false
*/
bool is_leaf(Node *node);
/**
* @brief Reallocate children array of the given node to the be as long as the number of children.
*
* @param node
*/
void resize_children_array(Node *node);
/*********************************** TREE FUNCTIONS ****************************/
/**
* @brief Free a whole tree from memory by freeing each node recursively, starting from the leaves.
*
* @param root
* Pointer to the root of the tree.
*/
void delete_tree(Node *root); | {
"domain": "codereview.stackexchange",
"id": 45486,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, tree",
"url": null
} |
c, tree
/**
* @brief Print a text representation of a tree to screen.
*
* @param root
* Pointer to the root of the tree.
*/
void print_tree(Node *root);
/**
* @brief Remove a whole branch recursively from its parent and free it from memory.
*
* @param parent
* Pointer to the parent.
* @param index_of_child
* parent->children[index_of_child] is supposed to point to the root of the branch that is to be deleted.
*/
void delete_branch(Node *parent, int index_of_child);
/*********************************** TREE DICTIONARY FUNCTIONS ****************************/
/**
* @brief Add a word to a tree dictionary.
*
* @param dict
* Pointer to the root of the dictionary.
* @param word
* Pointer to string representing the word.
*/
void add_word(Node *dict, const char *word);
/**
* @brief Check if a tree dictionary contains a word.
*
* @param dict
* Pointer to the root of the dictionary.
* @param word
* Pointer to string representing the word.
* @return true
* @return false
*/
bool contains_word(Node *dict, const char *word);
/**
* @brief Delete a word from a tree dictionary.
*
* @param dict
* Pointer to the root of the dictionary.
* @param word
* Pointer to string representing the word.
*/
void delete_word(Node *dict, const char *word);
#endif
/**
* @file treedict.c
* @author
* @brief Code for treedict.h.
*
*
*
*/
#include "treedict.h"
/*********************************** NODE DEFINITION ****************************/
/**
* @brief Default length the the node.children array in a newly created node.
*/
static const int DEFAULT_LENGTH_OF_CHILDREN_ARRAY = 32;
/*********************************** NODE FUNCTIONS ****************************/
/*
Allocate memory for a new node instance and initialize its attributes before returning a pointer
to the user.
*/
Node *create_node(char letter) {
Node *node = (Node *)calloc(1,sizeof(Node)); | {
"domain": "codereview.stackexchange",
"id": 45486,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, tree",
"url": null
} |
c, tree
node->children = (Node **)malloc(DEFAULT_LENGTH_OF_CHILDREN_ARRAY*sizeof(Node*));
node->length_of_children_array = DEFAULT_LENGTH_OF_CHILDREN_ARRAY;
node->letter = letter;
return node;
}
/*
Free the children array of the given node from memory and then free the node itself.
*/
void delete_node(Node *node) {
free(node->children);
free(node);
}
/*
Add a child node to a parent node. If the children array is full, reallocate it with twice its length.
*/
void add_child(Node *parent, Node *child) {
if (parent->number_of_children == parent->length_of_children_array) {
parent->children = (Node **)realloc(parent->children, 2*parent->length_of_children_array*sizeof(Node *));
parent->length_of_children_array *= 2;
}
parent->children[parent->number_of_children] = child;
++parent->number_of_children;
}
/*
Check if a child is a leaf.
*/
bool is_leaf(Node *node) {
return node->number_of_children == 0;
}
void resize_children_array(Node *node) {
node->children = (Node **)realloc(node->children, node->number_of_children*sizeof(Node *));
node->length_of_children_array = node->number_of_children;
}
/*********************************** TREE FUNCTIONS ****************************/
/*
Recursively delete a whole tree.
*/
void delete_tree(Node *root) {
int i;
for (i = 0; i < root->number_of_children; i++)
delete_tree(root->children[i]);
delete_node(root);
}
/*
If the child has any children, the branch of the tree starting with the child is
recursively deleted and its memory is freed.
*/
void delete_branch(Node *parent, int index_of_child){
int i;
delete_tree(parent->children[index_of_child]);
for(i = index_of_child; i < parent->number_of_children-1; i++)
parent->children[i] = parent->children[i+1];
parent->number_of_children--;
}
/*
Helper function that actually does the recursion for the print_tree function below. | {
"domain": "codereview.stackexchange",
"id": 45486,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, tree",
"url": null
} |
c, tree
/*
Helper function that actually does the recursion for the print_tree function below.
branch is a pointer that keeps track of the node of the tree we are at.
depth tells us how deep we are in the tree.
Represent the depth by printing an equal number of dots. Then, print the current letter. If
we are at an end-of-word node, print E. Print a new-line.
Finally, call print_branch at all the children of the current node with an incremented depth.
*/
void print_branch(Node* branch, int depth) {
int i,j;
for (j = 0; j < depth; j++)
printf(".");
if (branch->is_end_of_word)
printf("%c E\n", branch->letter);
else
printf("%c\n", branch->letter);
for (i = 0; i < branch->number_of_children; i++)
print_branch(branch->children[i], depth+1);
}
void print_tree(Node *root) {
print_branch(root, 0);
}
/*********************************** TREE DICTIONARY FUNCTIONS ****************************/
void add_word(Node *dict, const char *word) {
/* i = string index, j = children index */
int i = 0, j;
bool matching_child_found;
Node *current_node = dict;
/* Travel along the branches of the dictionary to find the end of the longest
prefix of word contained in the dictionary. */
while (true) {
matching_child_found = false;
for (j = 0; j < current_node->number_of_children; j++)
if ((current_node->children[j])->letter == word[i]) {
current_node = current_node->children[j];
matching_child_found = true;
i++;
break;
}
if (!matching_child_found) break;
if (word[i] == '\0') break;
} | {
"domain": "codereview.stackexchange",
"id": 45486,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, tree",
"url": null
} |
c, tree
/* If we have found a child that matches the last character of the word, it suffices to
mark it as an end-of-word character. Otherwise, if the last found character is not the last
character of the word, add one node for each of the following characters and mark the last
as an end-of-word character. */
if (matching_child_found)
current_node->is_end_of_word = true;
else {
Node *new_node;
for (; word[i] != '\0'; i++) {
new_node = create_node(word[i]);
add_child(current_node, new_node);
current_node = new_node;
}
current_node->is_end_of_word = true;
}
}
/*
Travel along the branches of the dictionary comparing the letter of each child of
current node to word[i]. If no matching child is found, return false. If a matching child
is found for the last letter of word, check whether it is an end-of-word node.
If it is, return true. If it isn't, return false.
*/
bool contains_word(Node *dict, const char *word){
/* i = string index, j = children index */
int i = 0, j;
bool matching_child_found;
Node *current_node = dict;
/* Travel along the branches of the dictionary comparing the letter of each child of
current node to word[i]. */
while (true) {
matching_child_found = false;
for (j = 0; j < current_node->number_of_children; j++)
if ((current_node->children[j])->letter == word[i]) {
current_node = current_node->children[j];
matching_child_found = true;
i++;
break;
}
/* If no mathing child is found word is no in the dictionary. */
if (!matching_child_found) return false;
/* If a node matching the last character of word is found, the word is in the dictionary
if and only if the node is an end-of-word node. */
if (word[i] == '\0') return current_node->is_end_of_word;
}
} | {
"domain": "codereview.stackexchange",
"id": 45486,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, tree",
"url": null
} |
c, tree
/*
If no matching child is found, the word is not in the dictionary
and thus we can return.
If we find the node containing the last letter of word, if such node is marked as an end-of-word
node, and if such node has no children, we will deleted the entire branch with root node
"parent_of_branch_to_delete->children[index_of_branch_to_delte]",
as these nodes would become redundant.
On the other hand, we find the node containing the last letter of word but the node has children,
no node needs to be deleted and it suffices to set current_node->is_end_of_word = false.
*/
void delete_word(Node *dict, const char *word) {
/* i = string index, j = children index */
int i = 0, j, index_of_branch_to_delete;
Node *current_node = dict, *parent_of_branch_to_delete = NULL;
bool matching_child_found;
/* Travel along the branches of the dictionary comparing the letter of each child of
current node to word[i]. */
while (true) {
matching_child_found = false;
for (j = 0; j < current_node->number_of_children; j++)
if ((current_node->children[j])->letter == word[i]) {
/* Keep track of the last node encountered with more than 1 child and keep track of the index
that the subsequent branch we travel along has in its children array (parent_of_branch_to_delete
and index_of_branch_to_delete) respectively. */
if (current_node->number_of_children > 1) {
parent_of_branch_to_delete = current_node;
index_of_branch_to_delete = j;
}
matching_child_found = true;
current_node = current_node->children[j];
i++;
break;
}
/* If no matching child is found, we can return because the word is not
in the dictionary in the first place. */
if (!matching_child_found) return; | {
"domain": "codereview.stackexchange",
"id": 45486,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, tree",
"url": null
} |
c, tree
/* If a node containing the last character of the string is found, but it is not marked as
an end_of_word node, the word is not in the dictionary and we can thus return. */
if (word[i] == '\0' && !current_node->is_end_of_word) return;
/* If a node containing the last character of the string is found and such node is marked as
an end-of-word node, the word is in the dictionary and we can procced
break out of the loop and remove it. */
if (word[i] == '\0' && current_node->is_end_of_word) break;
}
/* If current_node has children, it suffices to unmark it as an end-of-word node but no node
needs to be deleted as every node we have iterated along is part of other words and is not redundant.
If current_node has no children, the entire branch is sits on needs
to be remove at its last bifurcation.
*/
if (current_node->number_of_children > 0)
current_node->is_end_of_word = false;
else
delete_branch(parent_of_branch_to_delete, index_of_branch_to_delete);
}
I have done some simple unit testing with words loaded from a .txt file and everything seems to be working fairly well, but I would appreciate it very much if someone more experienced than me could take a look at my code and confirm that I'm on the right path.
My main concerns are: | {
"domain": "codereview.stackexchange",
"id": 45486,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, tree",
"url": null
} |
c, tree
Although C is not geared towards OOP programming, I find it very hard not to think in terms of objects, methods and attributes. I know that OOP is not a language paradigm but a programming paradigm... still, I'm not sure whether this is the best approach to C programming. Does my code look like native C to you, or does it look like I am translating from Python to C in my head?
Is my code memory safe? Am I leaking memory somewhere? I have written deleter functions both for trees and nodes and I have tried to have them handle garbage collection. Is this recommended? Have I used
I have used a Doxygen extension for vscode to generate docstrings. Is this appropriate? Is my code readable? Is it good practice to put API documentation in .h files and implementation documentation in .c files?
Does the way I have divided the code between treedict.h and treedict.c make sense? Am I putting header files and the preprocessor to good use?
I'm following the C89 standard. I know that C99 is a bit nicer (one-line comments and all...), but my school still uses C89 and I will be required to write assignments in C89 in the next few months, so I've decided to practice C89.
Thank you very munch in advance, and have a good day.
PS: bool.h just defines bool as an enum.
/**
* @file bool.h
* @author
* @brief Custom definition for Boolean datatype.
*
*/
#ifndef BOOL_H
#define BOOL_H
typedef enum { false, true } bool;
#endif
Answer: Hide the implementation:
Node should be an opaque data type defined in the source file. The header file would then only contain the forward declaration, and the prototypes for the APIs:
#ifndef TREEDICT_H
#define TREEDICT_H
#include <stdio.h>
#include <stdlib.h>
#include "bool.h"
/*********************************** NODE DEFINITION ****************************/
struct Node;
/*********************************** NODE FUNCTIONS ****************************/
...
Node *create_node(char letter);
... | {
"domain": "codereview.stackexchange",
"id": 45486,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, tree",
"url": null
} |
c, tree
and in the source file:
struct Node {
struct Node **children; /*< Array of pointers to the children of the node. */
int length_of_children_array; /*< Length of allocated children array
(NB: this is not the number of children, just the memory allocated for
their pointers.)
*/
int number_of_children; /*< Number of children of the node. */
char letter; /*< Letter represented by the node. */
bool is_end_of_word; /*< Bool marking whether this is an end-of-word node or not. */
};
See: What defines an opaque type in C, and when are they necessary and/or useful?
Do not cast the return of malloc() and family:
Starting with C89, malloc() and family returns a generic void * (as compared to the char * it originally used to) that is implicitly converted to any other pointer type (the cast is redundant, no need to clutter the codebase).
#if 0
Node *node = (Node *)calloc(1,sizeof(Node));
#else
// Take the size from the variable rather than its type to reduce the chance of a mismatch.
Node *const node = calloc(1, sizeof *node);
#endif
Side-note: Is there any specific reason you used calloc() for node but malloc() for node->children?
Check the return value of library functions:
If a function be advertised to return an error code in the event of
difficulties, thou shalt check for that code, yea, even though the
checks triple the size of thy code and produce aches in thy typing
fingers, for if thou thinkest ''it cannot happen to me'', the gods
shall surely punish thee for thy arrogance. - The Ten Commandments for C Programmers
According to the C17 standard:
The malloc function returns either a null pointer or a pointer to the allocated space.
Failing to check for it risks invoking undefined behavior by a subsequent null pointer dereference.
#if 0
Node * node = (Node *)calloc(1,sizeof(Node)); | {
"domain": "codereview.stackexchange",
"id": 45486,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, tree",
"url": null
} |
c, tree
node->children = (Node **)malloc(DEFAULT_LENGTH_OF_CHILDREN_ARRAY*sizeof(Node*));
#else
Node *const node = calloc(1, sizeof *node);
if (!node) {
return NULL;
}
node->children = malloc(DEFAULT_LENGTH_OF_CHILDREN_ARRAY * sizeof node->children[0]);
if (!node->children) {
free(node);
return NULL;
}
...
}
#endif
Use braces around if/while/for statement et cetera:
I suggest always using:
if (current_node->number_of_children > 0) {
current_node->is_end_of_word = false;
} else {
delete_branch(parent_of_branch_to_delete, index_of_branch_to_delete);
}
to
if (current_node->number_of_children > 0)
current_node->is_end_of_word = false;
else
delete_branch(parent_of_branch_to_delete, index_of_branch_to_delete);
The problem with the second version is that if you go back and add a second statement to the if or else clause without adding the curly braces, your code will break. See: Apple's SSL/TLS bug.
Use size_t for sizes, cardinalities, and ordinal numbers:
int length_of_children_array; /*< Length of allocated children array
(NB: this is not the number of children, just the memory allocated for
their pointers.)
*/
int number_of_children; /*< Number of children of the node. */
The length_of_children_array and number_of_children can not be negative. Consider using size_t or some other unsigned type.
Minor:
/*
Free the children array of the given node from memory and then free the node itself.
*/
void delete_node(Node *node) {
free(node->children);
free(node);
}
I don't think this comment adds any value to the code. It is perfectly clear what delete_node() does. Consider removing it. | {
"domain": "codereview.stackexchange",
"id": 45486,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, tree",
"url": null
} |
python, python-3.x, parsing
Title: Simple Python text file Parser and Converter
Question: I wrote a simple text parser and a converter using Python 3.12.1.
The purpose is to read a text file that contains 20000 words one per line, and create an output file that contains all the queries.
Here is a snippet of the input file with fake values:
aaaaaaaa
bbbbbbbb
cccccccc
dddddddd
I got this file from an external source and I need to save those words in a database. For that reason, I need to read the input file and create the SQL queries. I can directly run the queries on the DB, but for simplicity, here I published a version with a text file for output.
I tried to implement the separation of concerns and the single responsability concept.
Here's my code:
#!/usr/bin/env python3
from abc import ABC, abstractmethod
class ParserInterface(ABC):
@abstractmethod
def parse(self) -> None:
pass
class ConverterInterface(ABC):
@abstractmethod
def convert(self, tablename: str = '', columnname: str = '') -> None:
pass
@abstractmethod
def writeOnFile(self, filename: str = '') -> None:
pass
class Parser(ParserInterface):
def __init__(self, filename: str = '') -> None:
if not filename:
raise Exception('Filename cannot be empty!')
self.filename = filename
self.tokens = set()
def parse(self) -> None:
with open(self.filename, 'r') as file:
while line := file.readline():
self.tokens.add(line.rstrip())
class TextToMySQLConverter(ConverterInterface):
def __init__(self) -> None:
self.tokens = set()
self.queries = set()
def setTokens(self, tokens: set = set()) -> None:
self.tokens = tokens | {
"domain": "codereview.stackexchange",
"id": 45487,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, parsing",
"url": null
} |
python, python-3.x, parsing
def setTokens(self, tokens: set = set()) -> None:
self.tokens = tokens
def convert(self, tablename: str = '', columnname: str = '') -> None:
if not tablename:
raise Exception('Tablename cannot be empty!')
if not columnname:
raise Exception('Columnname cannot be empty!')
for token in self.tokens:
query = 'INSERT INTO {0} ({1}) VALUES ("{2}");'.format(tablename, columnname, token)
self.queries.add(query)
def writeOnFile(self, filename: str = '') -> None:
if not filename:
raise Exception('Filename cannot be empty!')
with open(filename,'w') as file:
for query in self.queries:
file.write('{0}\n'.format(query))
class Manager:
def __init__(self, parser: ParserInterface = None, converter: ConverterInterface = None) -> None:
if not parser:
raise Exception('Parser cannot be None!')
if not converter:
raise Exception('Converter cannot be None!')
self.parser = parser
self.converter = converter
self.outputFilename = ''
self.tableName = ''
self.columnName = ''
def setOutputFilename(self, filename: str = '') -> None:
if not filename:
raise Exception('Filename cannot be empty!')
self.outputFilename = filename
def setTableName(self, tableName: str = '') -> None:
if not tableName:
raise Exception('Filename cannot be empty!')
self.tableName = tableName
def setColumnName(self, columnName: str = '') -> None:
if not columnName:
raise Exception('Filename cannot be empty!')
self.columnName = columnName
def process(self):
self.parser.parse()
tokens = self.parser.tokens
converter.setTokens(tokens)
converter.convert(self.tableName, self.columnName)
converter.writeOnFile(self.outputFilename) | {
"domain": "codereview.stackexchange",
"id": 45487,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, parsing",
"url": null
} |
python, python-3.x, parsing
converter.writeOnFile(self.outputFilename)
if __name__ == "__main__":
parser = Parser('original.txt')
converter = TextToMySQLConverter() # We can implement a strategy here
manager = Manager(parser, converter)
manager.setOutputFilename('queries.txt')
manager.setTableName("tablename")
manager.setColumnName("columname")
manager.process()
Answer: Naming
Method/function names, parameters, and variable names should be snake_case
Parser.parse
This doesn't have to be a while loop, you can just use set.update on an iterable:
class Parser(ParserInterface):
~snip~
def parse(self) -> None:
# no need for the 'r' mode, as it's the default
with open(self.filename) as fh:
self.tokens.update((line.rstrip() for line in fh))
TextToMySQLConverter.write_on_file
To write the set to the file, simply use str.join:
def write_on_file(self, filename: str = '') -> None:
if not filename:
raise Exception('Filename cannot be empty!')
with open(filename, 'w') as file:
file.write('\n'.join(self.queries))
Bug: converter
I think you meant to have self.converter in Manager.process, otherwise it's relying on a global:
def process(self):
self.parser.parse()
tokens = self.parser.tokens
# Here
converter.setTokens(tokens)
converter.convert(self.tableName, self.columnName)
converter.writeOnFile(self.outputFilename) | {
"domain": "codereview.stackexchange",
"id": 45487,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, parsing",
"url": null
} |
python, python-3.x, parsing
converter.writeOnFile(self.outputFilename)
Getters and Setters
The Manager class should be able to take output_filename, table_name, and column_name as parameters, since you seem to know these ahead of time. This way you don't need the getter and setter methods:
class Manager:
def __init__(
self,
output_filename: str,
table_name: str,
column_name: str,
parser: ParserInterface, # Make these required
converter: ConverterInterface
) -> None:
self.parser = parser
self.converter = converter
self.output_filename = output_filename
self.table_name = table_name
self.column_name = column_name
def process(self):
self.parser.parse()
self.converter.tokens = self.parser.tokens
self.converter.convert(self.table_name, self.column_name)
self.converter.write_file(self.output_filename)
However, now we have a class with two methods, one of which is __init__. This means that we should be able to safely convert this into a standalone function:
def parse_and_write(
parser: ParserInterface,
converter: ConverterInterface,
output_filename: str,
table_name: str,
column_name: str
):
parser.parse()
converter.tokens = parser.tokens
converter.convert(table_name, column_name)
converter.write_file(output_filename)
converter.tokens = parser.tokens
IMO, parser.tokens should just be an argument to converter.convert:
class TextToMySQLConverter(ConverterInterface):
def __init__(self):
self.tokens = set()
self.queries = set()
def convert(self, tokens: set[str], tablename: str, columnname: str) -> None:
for token in tokens:
query = 'INSERT INTO {0} ({1}) VALUES ("{2}");'.format(tablename, columnname, token)
self.queries.add(query) | {
"domain": "codereview.stackexchange",
"id": 45487,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, parsing",
"url": null
} |
python, python-3.x, parsing
Now, this can also be refactored in the same way. Instead of returning None, return queries:
def convert_tokens(self, tokens: set[str], tablename: str, columnname: str) -> set[str]:
queries = set()
for token in tokens:
query = 'INSERT INTO {0} ({1}) VALUES ("{2}");'.format(tablename, columnname, token)
queries.add(query)
return queries
What about write_to_file? That is a function as well, we aren't carrying around state, and it's just as easy to provide queries and output_filename as arguments:
def write_queries(file: str, queries: set[str]) -> None:
with open(file, 'w') as fh:
fh.write('\n'.join(queries))
Parser
As above, so below. Parser is also just a single function:
def parse(filename: str) -> set[str]:
with open(filename) as file:
return set((line.rstrip() for line in file))
main function
Now, your process function is really just a main:
def parse(filename: str) -> set[str]:
with open(filename, 'r') as file:
return set((line.rstrip() for line in file))
def convert_tokens(tokens: set[str], tablename: str, columnname: str) -> set[str]:
queries = set()
for token in tokens:
query = 'INSERT INTO {0} ({1}) VALUES ("{2}");'.format(tablename, columnname, token)
queries.add(query)
return queries
def write_queries(file: str, queries: set[str]) -> None:
with open(file, 'w') as fh:
fh.write('\n'.join(queries))
def main(input_file: str, output_file: str, table_name: str, column_name: str):
tokens = parse(input_file)
queries = convert_tokens(tokens)
write_queries(queries=queries, file=output_file)
return
if __name__ == "__main__":
main(
input_file='original.txt',
output_file='queries.txt',
table_name='tablename',
column_name='columnname'
)
``` | {
"domain": "codereview.stackexchange",
"id": 45487,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-3.x, parsing",
"url": null
} |
python, database, git, sqlalchemy
Title: Python SQLAlchemy database model for Version Control system
Question: This is my first time creating a non-trivial database and I was wondering what I could do better. As the title says this will be used a in toy version control system. Most of my choices feel "random" because I do not know what is and is not important. I also can not decide on what the tables should know about themselves. For example: Should objects belong to a commit? Is it ok to keep every object unlabeled and only have commits refer to them. And things like that. Any review would be welcome.
class Object(Base):
__tablename__ = "objects"
oid = Column(String, primary_key= True)
name = Column(String)
blob = Column(BLOB)
class Commit(Base):
__tablename__ = "commits"
oid = Column(String, primary_key= True)
commit_message = Column(String)
parent_oid = Column(String, nullable= True)
objects = relationship("Object", secondary="commit_object_association")
repository_id = Column(Integer, ForeignKey("repositories.id"))
repository = relationship("Repository", back_populates="commits")
# Many to Many
class CommitObjectAssociation(Base):
__tablename__ = 'commit_object_association'
commit_oid = Column(String, ForeignKey('commits.oid'), primary_key=True)
object_oid = Column(String, ForeignKey('objects.oid'), primary_key=True)
class Branch(Base):
__tablename__ = "branches"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String)
head_commit_oid = Column(String, ForeignKey("commits.oid"))
repository_id = Column(Integer, ForeignKey("repositories.id"))
repository = relationship("Repository", back_populates="branches", foreign_keys=[repository_id])
__table_args__ = (
UniqueConstraint('name', 'repository_id', name='unique_branch_per_repo'),
) | {
"domain": "codereview.stackexchange",
"id": 45488,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, database, git, sqlalchemy",
"url": null
} |
python, database, git, sqlalchemy
class Repository(Base):
__tablename__ = "repositories"
id = Column(Integer, primary_key=True, autoincrement= True)
name = Column(String)
head_oid = Column(String, nullable= True, default= None)
current_branch_id = Column(Integer, ForeignKey("branches.id"), nullable=True)
current_branch = relationship("Branch", uselist=False, foreign_keys=[current_branch_id])
creator_id = Column(Integer, ForeignKey('users.id'), nullable= False)
creator = relationship("User", back_populates="repositories")
commits = relationship("Commit", back_populates="repository")
branches = relationship("Branch", back_populates="repository", foreign_keys="Branch.repository_id")
__table_args__ = (
UniqueConstraint('name', 'creator_id', name='unique_repo_per_user'),
)
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
name = Column(String, unique= True)
password = Column(String)
repositories = relationship("Repository", back_populates="creator")
Answer: object()
class Object(Base):
This identifier does not clash with or shadow the object() builtin.
But still, you might give a moment's thought to giving this class
a more descriptive name, something more project-specific,
maybe vc_object or vc_obj.
I do not look forward to hallway discussions or telephone calls
where we diagnose the behavior of an Object which is an object.
Also the {oid, name, blob} columns are generic enough
that this class really needs a docstring
explaining the concept it models.
As it stands, I don't understand why we'd want to represent
this pair of rows:
(1, "bob", "lorem ipsum")
(2, "bob", "lorem ipsum")
That is, what does it mean to have distinct rows with same name?
Or perhaps we wanted a UNIQUE index there?
singular
__tablename__ = "objects"
...
__tablename__ = "commits" | {
"domain": "codereview.stackexchange",
"id": 45488,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, database, git, sqlalchemy",
"url": null
} |
python, database, git, sqlalchemy
Reasonable people will disagree on this topic.
I assert it makes more sense to talk about "the commit table".
Given that it's an RDBMS table,
it is clear that we intend to store more than a single row in it.
lint
oid = Column(String, primary_key= True) ...
parent_oid = Column(String, nullable= True)
Every now and again run your code through
black *.py, please.
Or run a
linter
and heed its advice.
plural
class User(Base):
__tablename__ = "users"
It appears that some projects wind up adopting the
"tables are plural" rule because
they have a user table, and
sql-92 makes USER a reserved keyword, present in several commands.
Consider calling this the vc_user table instead.
credentials
password = Column(String) | {
"domain": "codereview.stackexchange",
"id": 45488,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, database, git, sqlalchemy",
"url": null
} |
python, database, git, sqlalchemy
Consider calling this the vc_user table instead.
credentials
password = Column(String)
That seems bad.
Do not store plaintext passwords.
Prefer to store an
argon2id
hash.
next steps
Run it, and examine some SHOW CREATE TABLE output
to verify you're happy with the defaults and with the choices
that you've made.
Pay careful attention to whether it really does make sense
for a given column to hold a NULL value.
When in doubt, add a NOT NULL constraint,
as humans can reason about queries (including JOIN queries)
on such columns more easily.
Verify that tables and indexes use a character collation
order you're happy with, typically UTF8 rather than Latin1.
Get it right from the start, so you don't have
conflicting character sets in "old" and "new" tables
which the query planner has trouble combining.
Consider using an automated tool to produce a graphical
ERD.
Give a little thought to how the system will evolve.
You will likely need a "test" DB environment,
which routinely gets nuked with DELETE commands
and/or DROP TABLE ... CASCADE commands.
The order in which you do things may matter,
with one DB object holding a reference on another,
thereby preventing a DROP.
You may want to script the details, for reproducibility.
Your "prod" DB environment will eventually hold
rows that you'd be sad if you lost them.
Write a brief script that dumps rows to .csv or .dump file.
Verify that it works,
for example by restoring "prod" into newly created "test" tables.
I don't know what your favorite "prod" database vendor would be,
possibly postgres.
Take advantage of the flexibility offered by the sqlalchemy connect string.
Test your code against more than one backend,
such as both postgres and sqlite.
It has its limitations, but sqlite database files
are a terrific match for rapid edit-debug cycles.
A simple cp command can quickly re-establish
known state from a backup DB file.
Write automated
tests
that interact with your tables. | {
"domain": "codereview.stackexchange",
"id": 45488,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, database, git, sqlalchemy",
"url": null
} |
c++, parsing
Title: Natural language text fast tokenizer (Rev.4)
Question: This is the forth iteration of the Natural language text fast tokenizer code review. Special thanks goes to G. Sliepen, Toby Speight and uli who conducted previous reviews and to Matthieu M. and Adrian McCarthy who participated with important findings.
Functional specification
Implement a class for fast text tokenization handling some natural language specifics below:
Consider ‘ ‘ (space) as a delimiter, keeping a way to extend the list of delimiters later; the delimiters couldn’t be a part of other constructs.
Extract stable collocations like “i.e.”, “etc.”, “…” as a single lexem.
In case word contains “inword” characters like ‘-‘ and ‘’’ (examples: semi-column, half-, cat’s) return the whole construct as a whole lexem.
Threat all other non-alphanumeric characters as separate lexems.
Return sequences of numbers (integers without signs) as a single lexem.
Consider out of scope paired quotes and other lexical parsing level issues.
Performance is critical, since the amount of data is huge. The function should be thread-safe.
Changes
The code has been reworked according to most of code review points.
The only exception is brackets { and } formatting consistency; I am still in two minds if my approach somewhere should be replaced with lengthier one.
Reservations
Methods implementation inside of the class definition done only to the sake of brevity; production code will have them implemented separately.
The code
Here is the updated code for the code review; could you please take a look and suggest further ways to improve or confirm that this is ready to go code?
Fully functional demo.
#include <algorithm>
#include <iostream>
#include <limits>
#include <locale>
#include <numeric>
#include <ranges>
#include <vector>
namespace fast {
template <typename Fn>
class IsSomething
{
std::array<char, std::numeric_limits<char>::max() + 1> cache = {}; | {
"domain": "codereview.stackexchange",
"id": 45489,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
public:
explicit IsSomething(Fn fn, const std::locale& locale = {})
{
set_locale(fn, locale);
}
void set_locale(Fn fn, const std::locale& locale)
{
auto const func = [&locale, &fn](char c) { return fn(c, locale); };
std::ranges::copy(std::views::iota(0u, cache.size())
| std::views::transform(func),
cache.begin());
}
bool operator()(char c) const { return cache[c]; }
};
IsSomething isalpha(std::isalpha<char>);
IsSomething isdigit(std::isdigit<char>);
bool is_not_alphanumeric(char c) {
return !fast::isalpha(c)
&& !fast::isdigit(c);
}
}
class TokenRange {
std::string_view data;
public:
class Iterator {
const std::string_view delimiters = " ";
const std::vector<std::string_view> stable_lexems = { "i.e.", "etc.", "..." };
const std::string_view inword_symbols = "-\'";
std::string_view data;
std::string_view lexem;
public:
Iterator() {}
Iterator(std::string_view data) : data(data) { extract_lexem(); }
std::string_view operator*() const { return lexem; }
Iterator& operator++();
friend bool operator==(const Iterator& it1, const Iterator& it2) { return it1.data == it2.data; }
private:
void extract_lexem();
void skip_delimiters();
bool check_for_stable_lexems();
};
TokenRange(std::string_view data) : data(data) {}
Iterator begin() {
return Iterator(data);
}
Iterator end() {
return {};
}
};
void TokenRange::Iterator::skip_delimiters()
{
while (!data.empty() && std::ranges::contains(delimiters, data.front())) {
data.remove_prefix(1);
}
} | {
"domain": "codereview.stackexchange",
"id": 45489,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
bool TokenRange::Iterator::check_for_stable_lexems()
{
auto it = std::ranges::find_if(stable_lexems,
[&](auto stable_lexem) {
return data.starts_with(stable_lexem);
});
if (it != stable_lexems.end()) {
lexem = data.substr(0, it->size());
data = data.substr(it->size());
return true;
}
return false;
}
void TokenRange::Iterator::extract_lexem()
{
skip_delimiters();
if (check_for_stable_lexems()) {
return;
}
std::size_t index = 0;
while (index < data.size())
{
if (std::ranges::contains(delimiters, data[index])) {
break;
}
if (fast::is_not_alphanumeric(data[index])) {
if (index == 0) {
++index;
}
break;
}
const bool is_next_char_inword_symbol = (index+1) < data.size() ? std::ranges::contains(inword_symbols, data[index+1]) : false;
if (is_next_char_inword_symbol) {
++index;
}
++index;
}
lexem = data.substr(0, index);
data = data.substr(index);
}
TokenRange::Iterator& TokenRange::Iterator::operator++()
{
extract_lexem();
return *this;
}
int main()
{
std::string sample = "Let's consider, this cats' semi-simple sample, i.e. test data with ints: 100 and 0x20u, etc. For ... some testing...";
for (auto token : TokenRange(sample)) {
std::cout << token << " | ";
}
}
Answer: It doesn't output the last token
Your own demo shows that it is not printing the last token ....
Missing test suite
You should create a test suite that tests your class TokenRange. It could look like:
int main() {
struct {
std::string input;
std::vector<std::string> expected;
} samples = {
{"Hello, world!", {"Hello", ",", "world", "!"}},
…
};
for (auto& sample: samples) {
assert(std::ranges::equal(TokenRange(sample.input), sample.expected));
}
} | {
"domain": "codereview.stackexchange",
"id": 45489,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
c++, parsing
Make sure you test all the features of your tokenizer, like the stable lexems and in-word lexems, different locales, and corner cases like an empty input, input with only delimiters, and so on.
But the above code also doesn't compile, which brings me to:
It's not a valid std::ranges::input_iterator
While iterating over a TokenRange works, algorithms like std::ranges:equal() don't because you are not fulfilling all of the requirements of std::ranges::input_range. In particular, it is because your TokenRange::Iterator is not a valid std::ranges::input_iterator.
Because of this, your TokenRange will fail to work with many standard algorithms and views. Make sure you fulfill all the requirements. In particular, your iterator needs a post-increment operator and the member type difference_type to be defined.
Some nitpicking
IsSomething doesn't sound like a good name. Everything is something, so it's meaningless. A better name would be CharacterClass.
Avoid negative names. So instead of is_not_alphanumeric(), prefer is_alphanumeric(), and the caller can then just use ! to invert the meaning.
An even better name is isalnum(), to match std::isalnum(). | {
"domain": "codereview.stackexchange",
"id": 45489,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, parsing",
"url": null
} |
python, multithreading
Title: Static media gallery generator with python 3.12
Question: The main concern is the hard requirement for python3.12 since I couldn't find a clean replacement for pathlib.Path.relative_to(root, walk_up=True) in earlier pythons. I need it in order to generate relative URLs like ../../../.thumbs/some/dir/image.jpg
Also not sure if the multithreading is sane for parallel thumbnail generation as concurrency has never been my forte.
Would also be good to know if there is anything buggy, specifically ugly or inefficient that can be improved.
This is a stripped down version of the gist here — the only difference is in the amount of the js/css code which I cut down here in order to make it shorter. The gist version adds JS keyboard navigation and opens the assets in an overlay div rather than a new tab.
#!/usr/bin/env python3.12
# ^^^^^^^^^^^^^^^^^^^^^^^
# DEPENDENCIES:
# imagemagick:
# ubunbtu/debian: sudo apt install imagemagick
# macos: brew install imagemagick
# !!!MUST USE python3.12 of later!!!
# install python 3.12 on ubuntu:
# sudo add-apt-repository ppa:deadsnakes/ppa
# sudo apt install python3.12
#
# install python 3.12 on macos:
# brew install python@3.12
import os.path
import subprocess
from datetime import datetime
from pathlib import Path
from sys import argv
from urllib.parse import quote
from concurrent.futures import ThreadPoolExecutor
THUMBS_DIR = '~/.thumbs'
OUTPUT_FILE_NAME = "index.html"
IMAGE_SVG = ".svg"
IMAGE_HEIC = ".heic"
IMAGE_EXTENSIONS = (".jpg", ".jpeg", ".png", ".bmp", ".webp", ".gif", IMAGE_HEIC)
VIDEO_EXTENSIONS = (".mp4", ".mkv", ".webm", ".3gp", ".mov", ".mkv", ".ogv", ".mpg", ".mpeg")
AUDIO_EXTENSIONS = (".mp3", ".ogg", ".wav")
ASSET_EXTENSIONS = IMAGE_EXTENSIONS + VIDEO_EXTENSIONS + AUDIO_EXTENSIONS | {
"domain": "codereview.stackexchange",
"id": 45490,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, multithreading",
"url": null
} |
python, multithreading
ASSET_EXTENSIONS = IMAGE_EXTENSIONS + VIDEO_EXTENSIONS + AUDIO_EXTENSIONS
class Asset:
def __init__(self, type, path, created_date, html, thumbnail_path=None):
self.type = type
self.path = path
self.created_date = created_date
self.html = html
self.thumbnail = thumbnail_path
def get_creation_date(file_stat):
try:
return file_stat.st_birthtime
except AttributeError:
# We are probably on Linux. No way to get the creation date, only the last modification date.
return file_stat.st_mtime
UNITS_MAPPING = [
(1024 ** 5, 'P'),
(1024 ** 4, 'T'),
(1024 ** 3, 'G'),
(1024 ** 2, 'M'),
(1024 ** 1, 'K'),
(1024 ** 0, (' byte', ' bytes')),
]
def pretty_size(bytes, units=UNITS_MAPPING):
"""Human-readable file sizes.
ripped from https://pypi.python.org/pypi/hurry.filesize/
"""
for factor, suffix in units:
if bytes >= factor:
break
amount = int(bytes / factor)
if isinstance(suffix, tuple):
singular, multiple = suffix
if amount == 1:
suffix = singular
else:
suffix = multiple
return str(amount) + suffix
def collect_media_assets(root: Path, thumbsdir: Path):
homedir = os.path.expanduser('~')
assets: list[Asset] = []
for asset_path in root.rglob("*"):
if asset_path.is_file() and asset_path.suffix.lower() in ASSET_EXTENSIONS:
if asset_path.name.startswith('._'): # skip macos junk
continue
size_pretty = pretty_size(asset_path.stat().st_size)
created_date = datetime.fromtimestamp(get_creation_date(asset_path.stat()))
created_date_formatted = created_date.strftime("%Y-%m-%d %H:%M:%S")
relative_path = str(asset_path.relative_to(root))
escaped_path = quote(relative_path) | {
"domain": "codereview.stackexchange",
"id": 45490,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, multithreading",
"url": null
} |
python, multithreading
if asset_path.suffix.lower() == IMAGE_SVG:
assets.append(Asset('svg',
relative_path,
created_date, f"""<a class="item" href="{escaped_path}" target="_blank" title="{relative_path} size: {size_pretty}; created: {created_date_formatted}">
<img src="{relative_path}" loading="lazy">
</a>"""))
elif asset_path.suffix.lower() in IMAGE_EXTENSIONS:
segment = str(root.absolute()).replace(homedir, '').lstrip('/')
thumbnail_path: Path = thumbsdir / segment / relative_path
if asset_path.suffix.lower() == IMAGE_HEIC:
thumbnail_path = thumbnail_path.with_suffix(".jpg")
thumbnail_path_relative = thumbnail_path.relative_to(root, walk_up=True) ## IMPORTANT: REQUIRED python 3.12 minimum!
thumbnail_path_relative_escaped = quote(str(thumbnail_path_relative))
assets.append(Asset('image',
relative_path,
created_date, f"""<a class="item" href="{escaped_path}" target="_blank" title="{relative_path} size: {size_pretty}; created: {created_date_formatted}">
<img src="{thumbnail_path_relative_escaped}" loading="lazy">
</a>""", thumbnail_path))
elif asset_path.suffix.lower() in VIDEO_EXTENSIONS:
assets.append(Asset('video', relative_path, created_date, f"""<a class="item" href="{escaped_path}" target="_blank" title="{relative_path} size: {size_pretty}; created: {created_date_formatted}">
<video preload="none" controls><source src="{escaped_path}"></video>{asset_path.name}
</a>""")) | {
"domain": "codereview.stackexchange",
"id": 45490,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, multithreading",
"url": null
} |
python, multithreading
elif asset_path.suffix.lower() in AUDIO_EXTENSIONS:
assets.append(Asset('audio', relative_path, created_date, f"""<a class="item" href="{escaped_path}" target="_blank" title="{relative_path} size: {size_pretty} KB; created: {created_date_formatted}">
<audio controls src="{escaped_path}"></audio>{asset_path.name}
</a>"""))
print(f'{asset_path}')
return assets
def generate_gallery_html(html_data, output_file_path: Path):
with output_file_path.open(mode="w", encoding="utf-8") as fout:
fout.write('''<html>
<head>
<style>
* {
font-family: system-ui, sans-serif;
}
body>div {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(321px, 1fr));
gap: 5px;
}
.item {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
}
img, video {max-width: 321px;
height: auto;
display: table-cell;
}
a {
text-decoration: none;
font-size: 14px;
}
a:hover {
color: #0095e4;
}
a:visited {
color: #800080;
}
a:visited:hover {
color: #b900b9;
}
</style>
</head>
<body>
<div>'''
+ html_data
+ '''</div>
</body>
</html>''')
print(f"Gallery HTML file generated: {output_file_path.absolute()}")
def generate_thumbnails(assets: list[Asset], basedir: Path):
with ThreadPoolExecutor(max_workers=42) as executor:
asset: Asset
for asset in assets:
if asset.type == 'image':
executor.submit(make_thumbnail, asset, basedir) | {
"domain": "codereview.stackexchange",
"id": 45490,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, multithreading",
"url": null
} |
python, multithreading
def make_thumbnail(asset: Asset, basedir: Path):
thumbnail_dir: Path = asset.thumbnail.parent
thumbnail_dir.mkdir(parents=True, exist_ok=True)
if asset.thumbnail.exists():
print(f'⚡️\tSKIP existing thumbnail: {asset.thumbnail.absolute()}')
return
#### TODO: FIXME: validate path to convert: can be e.g. /usr/bin/convert, /usr/local/bin/convert...
args = ["/opt/homebrew/bin/convert",
str(basedir / asset.path),
"-strip",
"-quality", "60",
"-thumbnail", "300x"]
if Path(asset.path).suffix == '.heic':
args += ['-format', 'jpg']
args.append(str(asset.thumbnail.absolute()))
proc = subprocess.run(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = proc.stdout.decode()
print('✅', asset.path, str(asset.thumbnail.absolute()), result)
err = proc.stderr.decode()
if err:
print("", err)
if __name__ == "__main__":
thumbnails_dir: Path = Path(os.path.expanduser(THUMBS_DIR))
thumbnails_dir.mkdir(exist_ok=True)
root_dir = len(argv) > 1 and argv[1] or '.' # Use 1st arg or current directory as root
base_dir: Path = Path(root_dir).absolute()
asset_list: list[Asset] = collect_media_assets(base_dir, thumbnails_dir)
if not asset_list:
print("No media files found.")
else:
asset_list.sort(key=lambda a: a.created_date, reverse=True)
html_content: str = '\n'.join(asset.html for asset in asset_list)
generate_thumbnails(asset_list, base_dir)
generate_gallery_html(html_content, base_dir / OUTPUT_FILE_NAME)
if asset_list and len(asset_list):
print(f'''
Total assets: {len(asset_list)}
Images: {len([a for a in asset_list if a.type == 'image'])}
Audios: {len([a for a in asset_list if a.type == 'audio'])}
Videos: {len([a for a in asset_list if a.type == 'video'])}
''') | {
"domain": "codereview.stackexchange",
"id": 45490,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, multithreading",
"url": null
} |
python, multithreading
Answer: It's clear the author took great care when writing this code,
DRYing things up as the opportunity arose.
modern interpreter
#!/usr/bin/env python3.12
# ^^^^^^^^^^^^^^^^^^^^^^^
# ...
# !!!MUST USE python3.12 [or] later!!!
Gosh, that sure is a lot of shouting.
main concern is the hard requirement for python3.12
I'm afraid I don't share your concern.
It's not an onerous requirement,
and it will only get easier over time.
You helpfully include install instructions
covering two popular platforms; I thank you for that.
The python3.12 shebang works today, but will need updating soon.
I use apt and brew all the time, but I'm afraid I
seldom use them to affect interpreter details.
I would expect a conda environment.yml
or pip requirements.txt to choose the interpreter being used
in a particular venv.
Writing readme's and comments is nice, don't stop doing it.
However the machine pays attention to details much better than
humans do. Strive to produce an informative diagnostic.
What you really care about is something like this:
assert sys.version_info[:2] >= (3. 12) # we need Path.relative_to()
Or perhaps raise with a helpful message for downrev interpreter.
With that in hand, a simple python (or python3) shebang
should suffice, working fine next year plus the year after that.
source control
... version of the gist
You are clearly familiar with creating multiple git repos.
Git is a much better way of tracking version details than gist,
both for the code's author and its consumers.
'Nuff said.
modern libraries
import os.path
...
from pathlib import Path
We have established that pathlib will definitely be available.
Consider changing your os.path.expanduser() calls
to be Path( ... ).expanduser(), just for consistency.
Habits like that will tend to improve the design of your Public APIs,
with str parameters being replaced by Path parameters.
main guard
if __name__ == "__main__":
thumbnails_dir: Path = Path( ...
base_dir: Path = Path( ... | {
"domain": "codereview.stackexchange",
"id": 45490,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, multithreading",
"url": null
} |
python, multithreading
I really like the guard, thank you.
It's good that a unittest can safely import this without side effects.
Sorry, but I'm not quite getting the whole javaesque Bond james = Bond()
thing. It's no mystery to mypy, nor to a human reading this code,
what type those directory variables will be.
This code stanza isn't exactly too long.
But it's starting to get that way,
which suggests banishing it to within a def main(): function.
cracking argv
root_dir = len(argv) > 1 and argv[1] or '.' # Use 1st arg or current directory as root
This is mostly clear.
But not as clear as could be.
You're relying on the (unambiguous!) precedence of and vs or.
Consider helping out humans by adding redundant ( ) parens,
so there's no need for that comment.
Better, you might import typer and punt the whole thing to a library,
getting --usage help for free.
Kernighan & Richie would always crack argv by hand, but you don't need to.
It's unclear how base_dir and root_dir are distinct concepts.
Prefer to .absolute() the input before assigning it to a name.
annotations
I don't understand this line:
asset_list: list[Asset] = collect_media_assets(base_dir, thumbnails_dir)
Oohhh, wait, now I get it.
Look at the signature of the callee.
def collect_media_assets(root: Path, thumbsdir: Path): | {
"domain": "codereview.stackexchange",
"id": 45490,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, multithreading",
"url": null
} |
python, multithreading
It doesn't describe the return type.
(Based on an annotated assignment + return it's easy to work out.
But still.)
Describe the return type, please.
In general, if you feel the need to annotate some variable assignment,
take a step back and wonder if mypy is really asking you
to annotate a function signature.
Fixing up the signature would be far more valuable,
at this and at other call sites.
Granted, there are some situations that call for
annotation in the middle of a chunk of code,
usually when creating an empty container.
While dict() is beautiful,
it is alas also very flexible.
So we might need to distinguish
d: dict[str, int] = {} from
d: dict[int, str] = {}, and so on.
attrgetter
Clearly this works:
asset_list.sort(key=lambda a: a.created_date, reverse=True)
Prefer
attrgetter,
so we have key=attrgetter('created_date').
truthy
I don't understand this.
if asset_list and len(asset_list):
Your annotation clearly explained that None shall
not be coming back from collect_media_assets.
To say nothing of the fact that we just called .sort()
on it, a function which None lacks.
No need for comparing length to zero again,
as the first conjunct already did that.
filter
Images: {len([a for a in asset_list if a.type == 'image'])}
We construct a list, then throw it away.
But len() doesn't need a container -- len(generator) works just fine.
These would work well as filter() expressions.
aliasing builtins
class Asset:
def __init__(self, type, path, ... ):
self.type = type | {
"domain": "codereview.stackexchange",
"id": 45490,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, multithreading",
"url": null
} |
python, multithreading
This didn't cause any confusion with the type() function.
But still, consider using the conventional approach,
which would name it type_.
Similarly for bytes_.
It looks like the Asset class could be a
dataclass.
tricky assumptions
I get what you're trying to do here with the segment assignment.
elif asset_path.suffix.lower() in IMAGE_EXTENSIONS:
segment = str(root.absolute()).replace(homedir, '').lstrip('/')
thumbnail_path: Path = thumbsdir / segment / relative_path
But it depends on some assumptions
about public variable relationships which are
valid for default values, but not in general.
We could assert thumbnail_path.exists(),
but without confidence that it shall always be True.
Consider having more strictly defined relationships among your variables.
This might come in the form of insisting on certain names under $HOME.
Many packages choose to claim a chunk of FS namespace,
insisting that files reside under a package-owned root.
separated
fout.write('''<html> ...
</html>''')
Uggh, I hate that.
Pascal has ; semicolon separated statements,
unlike PL/I's semicolon terminated statements.
C and others follow one or the other convention.
I assert that a text file (such as *.html)
consists of newline terminated lines.
Not newline separated lines as we see here.
Last character before EOF should be \n newline.
redundant annotation
def generate_thumbnails(assets: list[Asset], basedir: Path):
...
asset: Asset
for asset in assets:
What a lovely signature!
Consider ending it with ... ) -> None:
I can't imagine why man or machine would need the asset hint,
given that first parameter offered a very clear introduction.
execlvp
#### TODO: FIXME: validate path to convert: can be e.g. /usr/bin/convert, /usr/local/bin/convert...
args = ["/opt/homebrew/bin/convert", | {
"domain": "codereview.stackexchange",
"id": 45490,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, multithreading",
"url": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.