text stringlengths 1 2.12k | source dict |
|---|---|
react.js, memoization
useEffect(() => {
if (!cookieConsentSet) {
openModal();
}
}, [cookieConsentSet]);
if (cookieConsentSet) return null;
return (
<Modal
style={NO_MODAL_OVERLAY}
isOpen={modalIsOpen}
className={styles.cookiePopup}
contentLabel='Cookie Notification'
portalClassName={styles.modalOverlayWrap} // cannot use overlayClassName
>
<div className={`${styles.content} ${showMoreInfo ? styles.expanded : ''}`}>
{showMoreInfo && <h3>Cookie Preferences</h3>}
<p>
{descriptiveText} <Link to='/cookie-policy'>Learn more</Link>
</p>
<div
className={`${styles.buttonsContainer} ${styles.spaced} ${styles.largeButtons} ${styles.cookieBtns} ${styles.specificity}`}
>
<button onClick={acceptCookies} className={styles.btnPrimary}>
Allow All Cookies
</button>
<button onClick={manageCookies} className={styles.btnGrey}>
{btnText}
</button>
</div>
</div>
</Modal>
);
};
I have a couple of questions in particular:
Am I correct in memoising the btnText and descriptiveText variales?
Should these functions also be memoised (with useCallback): openModal, closeModal, acceptCookies, declineCookies & manageCookies?
Presumably both should be memoised, otherwise they will be recreated on every page, even when the modal doesn't show up (if the user has already selected an option) because the component will still run.
Any advice would be appreciated, thank you. | {
"domain": "codereview.stackexchange",
"id": 44359,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "react.js, memoization",
"url": null
} |
react.js, memoization
Answer: When cookieConsentSet is truthy you return null; and none of the other code matters. You already know this by the second line of your code. Of course you cannot just return null; on line 3 because that would violate the rules of hooks by making the remaining hook calls conditional. But you can conditionally return a component.
My primary strategy here is to create an outer layer that checks cookieConsentSet. That way you can avoid executing anything else if you don't need to. If you were doing this sort of thing without using React contexts, then you would pass the setCookieConsent function from the outer component to the inner component via props. In this case you don't need to pass any props because both components can access the context.
Separated components, without any other changes:
const InnerCookiePopup = () => {
const { cookieConsentSet, setCookieConsent } = useContext<CookieContextType>(CookieContext);
// note: cookieConsentSet just gets a consent cookie value (so is null or string)
const [modalIsOpen, setIsOpen] = useState<boolean>(false);
const [showMoreInfo, setShowMoreInfo] = useState<boolean>(false);
const btnText = useMemo<string>(() => `${showMoreInfo ? 'Allow Essential' : 'Manage'} Cookies`, [showMoreInfo]);
const descriptiveText = useMemo<string>(
() => (showMoreInfo ? COOKIE_BANNER_MSG_DETAILED : COOKIE_BANNER_MSG_BRIEF),
[showMoreInfo]
);
const openModal = () => {
setIsOpen(true);
};
const closeModal = () => {
setIsOpen(false);
setShowMoreInfo(false);
};
const acceptCookies = () => {
const expiryDate: Date = new Date(new Date().setFullYear(new Date().getFullYear() + 1)); // 1 year from now
document.cookie = `cookieConsent=all; expires=${expiryDate.toUTCString()};`;
setCookieConsent('all');
closeModal();
}; | {
"domain": "codereview.stackexchange",
"id": 44359,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "react.js, memoization",
"url": null
} |
react.js, memoization
const declineCookies = () => {
const expiryDate: Date = new Date(new Date().setDate(new Date().getDate() + 1)); // expires in 1 day
document.cookie = `cookieConsent=essential; expires=${expiryDate.toUTCString()};`;
setCookieConsent('essential');
closeModal();
};
const manageCookies = () => {
if (!showMoreInfo) {
setShowMoreInfo(true);
return;
}
declineCookies();
};
useEffect(() => {
if (!cookieConsentSet) {
openModal();
}
}, [cookieConsentSet]);
return (
<Modal
style={NO_MODAL_OVERLAY}
isOpen={modalIsOpen}
className={styles.cookiePopup}
contentLabel='Cookie Notification'
portalClassName={styles.modalOverlayWrap} // cannot use overlayClassName
>
<div className={`${styles.content} ${showMoreInfo ? styles.expanded : ''}`}>
{showMoreInfo && <h3>Cookie Preferences</h3>}
<p>
{descriptiveText} <Link to='/cookie-policy'>Learn more</Link>
</p>
<div
className={`${styles.buttonsContainer} ${styles.spaced} ${styles.largeButtons} ${styles.cookieBtns} ${styles.specificity}`}
>
<button onClick={acceptCookies} className={styles.btnPrimary}>
Allow All Cookies
</button>
<button onClick={manageCookies} className={styles.btnGrey}>
{btnText}
</button>
</div>
</div>
</Modal>
);
}
export const CookiePopup = () => {
const { cookieConsentSet } = useContext<CookieContextType>(CookieContext);
if (cookieConsentSet) return null;
return (
<InnerCookiePopup/>
);
}; | {
"domain": "codereview.stackexchange",
"id": 44359,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "react.js, memoization",
"url": null
} |
react.js, memoization
if (cookieConsentSet) return null;
return (
<InnerCookiePopup/>
);
};
As far as memoization, there's not a lot of state changes or expensive computations happening here. Filling a string template is trivial and the ternary in descriptiveText is even more trivial. I would use React.memo to memoize the inner component as a whole. Yes your function will still get re-initialized when the component state changes and yes you could use useCallback, but it's probably overkill.
It's more important to make sure that the setCookieConsent function in your CookieContext is memoized so that you don't have unnecessary re-renders in the components that use this context. | {
"domain": "codereview.stackexchange",
"id": 44359,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "react.js, memoization",
"url": null
} |
react.js, memoization
I'm not understanding the need for the useEffect. It seems like you could just have the initial state of the modal be open. But I could be missing something. Maybe it's needed for a UI effect of opening the modal.
I'm also not loving that you set the document.cookie = property from inside your modal component. It's only executed inside callback functions so it's not a huge problem, but you might consider handling that through some other hook or component related to your CookieContext.
This modal covers both the "Manage Cookies" and the "Accept Cookies" situations. It's very confusing to me that you still display a <button onClick={manageCookies}> even when we are in "Manage Cookies" mode. It seems like the button acts differently? So then I don't think it should be co-mingled in this way. Maybe have separate buttons that are conditionally rendered? Maybe have a separate ModalUI component and pass down buttonLabel and onClickButton props? There's definitely more that can be cleaned up here.
This replaces your btnText and manageCookies and, to me, feels more readable and understandable.
{showMoreInfo ? (
<button onClick={declineCookies} className={styles.btnGrey}>
Allow Essential Cookies
</button>
) : (
<button onClick={() => setShowMoreInfo(true)} className={styles.btnGrey}>
Manage Cookies
</button>
)} | {
"domain": "codereview.stackexchange",
"id": 44359,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "react.js, memoization",
"url": null
} |
game, react.js, jsx, redux
Title: Simple memo game project
Question: I created my first React app with Redux and this a simple memo game with possibility to select difficulty level. I am new in React world, so it would be great if you review my project :)
Project structure:
src
โ App.js
โ App.scss
โ App.test.js
โ cards-data.js
โ index.js
โ index.scss
โ reportWebVitals.js
โ setupTests.js
โ
โโโโcomponents
โ โโโโbutton
โ โ button.component.jsx
โ โ button.styles.scss
โ โ
โ โโโโcards-directory
โ โ cards-directory.component.jsx
โ โ cards-directory.styles.scss
โ โ
โ โโโโcounter
โ โ counter.component.jsx
โ โ counter.styles.scss
โ โ
โ โโโโsingle-card
โ single-card.component.jsx
โ single-card.styles.scss
โ
โโโโroutes
โ โโโโgame-area
โ โ game-area.component.jsx
โ โ game-area.component.styles.scss
โ โ
โ โโโโmenu
โ menu.component.jsx
โ menu.styles.scss
โ
โโโโstore
โ reducer.utils.js
โ root-reducer.js
โ store.js
โ
โโโโcards
โ cards.action.js
โ cards.reducer.js
โ cards.selector.js
โ cards.types.js
โ
โโโโcounter
โ counter.action.js
โ counter.reducer.js
โ counter.selector.js
โ counter.types.js
โ
โโโโgame-difficulty
game-difficulty.action.js
game-difficulty.reducer.js
game-difficulty.selector.js
game-difficulty.types.js
App.js
import Menu from './routes/menu/menu.component';
import GameArea from './routes/game-area/game-area.component';
import { Routes, Route } from 'react-router-dom';
import './App.scss'; | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
game, react.js, jsx, redux
import { Routes, Route } from 'react-router-dom';
import './App.scss';
function App() {
return (
<div className="app-container">
<Routes>
<Route index element={<Menu />}></Route>
<Route path="/game-area" element={<GameArea />}></Route>
</Routes>
</div>
);
}
export default App;
App.scss
.app-container {
text-align: center;
display: flex;
justify-content: center;
margin: 12vh 0;
}
cards-data.js
export const CARDS_DATA = [
{
id: 0,
name: 'empty_card',
imagePath: 'https://i.ibb.co/Jsznnmg/empty-card.png'
},
{
id: 1,
name: 'buffalo_card',
imagePath: 'https://i.ibb.co/4Zyt8yR/buffalo-card.png'
},
{
id: 2,
name: 'desert_fennec_fox_card',
imagePath: 'https://i.ibb.co/G7bPZKD/desert-fennec-fox-card.png'
},
{
id: 3,
name: 'duck_card',
imagePath: 'https://i.ibb.co/Y05XH1K/duck-card.png'
},
{
id: 4,
name: 'kangaroo_card',
imagePath: 'https://i.ibb.co/KGPXL5W/kangaroo-card.png'
},
{
id: 5,
name: 'leopard_card',
imagePath: 'https://i.ibb.co/jGYcQ1v/leopard-card.png'
},
{
id: 6,
name: 'lion_card',
imagePath: 'https://i.ibb.co/x1Vqykv/lion-card.png'
},
{
id: 7,
name: 'raccoon_card',
imagePath: 'https://i.ibb.co/7v0dKRK/raccoon-card.png'
},
{
id: 8,
name: 'red_bear_card',
imagePath: 'https://i.ibb.co/qgjMjNJ/red-bear-card.png'
},
{
id: 9,
name: 'tiger_card',
imagePath: 'https://i.ibb.co/NTnjGSn/tiger-card.png'
},
{
id: 10,
name: 'zebra_card',
imagePath: 'https://i.ibb.co/jk1GDHg/zebra-card.png'
}
];
index.js | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
game, react.js, jsx, redux
index.js
import React from 'react';
import ReactDOM from 'react-dom/client';
import './index.scss';
import App from './App';
import reportWebVitals from './reportWebVitals';
import { BrowserRouter } from 'react-router-dom';
import { Provider } from 'react-redux';
import { store, persistor } from './store/store';
import { PersistGate } from 'redux-persist/integration/react';
const root = ReactDOM.createRoot(document.getElementById('root'));
root.render(
<React.StrictMode>
<Provider store={store}>
<PersistGate loading={null} persistor={persistor}>
<BrowserRouter>
<App />
</BrowserRouter>
</PersistGate>
</Provider>
</React.StrictMode>
);
// If you want to start measuring performance in your app, pass a function
// to log results (for example: reportWebVitals(console.log))
// or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
reportWebVitals();
index.scss
body {
margin: 0;
padding: 0;
width: 100%;
height: 100%;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
sans-serif;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
background-color: #F0D7D1;
}
code {
font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
monospace;
}
button.component.jsx
import './button.styles.scss';
export const BUTTON_TYPE_CLASSES = {
start: 'start',
restart: 'restart',
difficultyEasy: 'difficulty-easy',
difficultyMedium: 'difficulty-medium',
difficultyHard: 'difficulty-hard',
};
const Button = ({ children, buttonType, ...otherProps }) => {
return (
<button className={`button-container ${buttonType}`} {...otherProps}>
{children}
</button>
);
};
export default Button;
button.styles.scss
.button-container {
height: 40px;
width: 180px;
font-size: 20px;
color: #4a586e;
} | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
game, react.js, jsx, redux
.button-container {
height: 40px;
width: 180px;
font-size: 20px;
color: #4a586e;
}
.start {
background-color: #feac92;
border: 3px dashed #4a586e;
margin: 20px 0;
}
.difficulty-easy,
.difficulty-medium,
.difficulty-hard {
margin-bottom: 5px;
width: 130px;
}
.difficulty-easy {
background-color: #d4f794;
border: 3px dashed #8ea663;
}
.difficulty-medium {
background-color: #f3f59a;
border: 3px dashed #979957;
}
.difficulty-hard {
background-color: #f5bd9a;
border: 3px dashed #995f3a;
}
.difficulty-easy:focus {
background-color: #b3d07e;
}
.difficulty-medium:focus {
background-color: #c1c27a;
}
.difficulty-hard:focus {
background-color: #ca9c80;
}
.restart {
background-color: #fef0ef;
border: 2px solid #ae5f5f;
border-radius: 20px 20px;
color: #ae5f5f;
margin-top: 10px;
}
cards-directory.component.jsx
import { useEffect } from 'react';
import { useSelector, useDispatch } from 'react-redux';
import {
selectCards,
selectEmptyImagePath,
} from '../../store/cards/cards.selector';
import { selectCounterValue } from '../../store/counter/counter.selector';
import {
setCurrentImagePath,
setIsCardDisabled,
setIsRotationDisabled,
} from '../../store/cards/cards.action';
import { setCounterValue } from '../../store/counter/counter.action';
import SingleCard from '../single-card/single-card.component';
import './cards-directory.styles.scss';
const CardsDirectory = ({ cardType }) => {
const dispatch = useDispatch();
const cards = useSelector(selectCards);
const counterValue = useSelector(selectCounterValue);
const emptyImagePath = useSelector(selectEmptyImagePath);
const cardsToCompare = cards.filter(
(card) => card.currentImagePath === card.imagePath && !card.isCardDisabled
);
const isRotationDisabled = true; | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
game, react.js, jsx, redux
const toggleEmptyImagePath = (cardsToSetEmptyImagePath) =>
setTimeout(() => {
dispatch(
setCurrentImagePath(cards, cardsToSetEmptyImagePath, emptyImagePath)
);
}, 1000);
const toggleIsCardDisabled = (cardsToDisable) =>
dispatch(setIsCardDisabled(cards, cardsToDisable));
const toggleCounterValue = () => {
const increasedCounterValue = counterValue + 1;
setTimeout(() => {
dispatch(setCounterValue(increasedCounterValue));
}, 1000);
};
const toggleIsRotationDisabled = () =>
dispatch(setIsRotationDisabled(isRotationDisabled));
const toggleIsRotationEnabled = () => setTimeout(() => {
dispatch(setIsRotationDisabled(!isRotationDisabled));
}, 1000);
const compareCards = () => {
cardsToCompare.reduce((prevCard, currentCard) =>
prevCard.imagePath !== currentCard.imagePath
? toggleEmptyImagePath(cardsToCompare)
: toggleIsCardDisabled(cardsToCompare)
);
};
useEffect(() => {
if (cardsToCompare.length < 2) return;
toggleIsRotationDisabled();
compareCards();
toggleIsRotationEnabled();
toggleCounterValue();
});
return (
<div className={`cards-directory-container ${cardType}`}>
{cards.map((card) => (
<SingleCard key={card.id} card={card} />
))}
</div>
);
};
export default CardsDirectory;
cards-directory.styles.scss
.cards-directory-container {
background-color: #fef0ef;
border: 5px solid #fef0ef;
border-radius: 4%;
display: flex;
flex-wrap: wrap;
justify-content: center;
align-content: center;
gap: 5px;
}
.easy {
width: 500px;
height: 380px;
}
.medium {
width: 500px;
height: 500px;
}
.hard {
width: 620px;
height: 500px;
}
counter.component.jsx | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
game, react.js, jsx, redux
.hard {
width: 620px;
height: 500px;
}
counter.component.jsx
import { useSelector } from 'react-redux';
import { selectCounterValue } from '../../store/counter/counter.selector';
import './counter.styles.scss';
const Counter = () => {
const currentCounterValue = useSelector(selectCounterValue);
return (
<div className='counter-container'>
<span className='counter-value'>{currentCounterValue}</span>
</div>
);
};
export default Counter;
counter.styles.scss
.counter-container {
height: 34px;
width: 180px;
background-color: #fef0ef;
border: 2px solid #ae5f5f;
margin-top: 10px;
}
.counter-value {
font-size: 20px;
color: #ae5f5f;
line-height: 34px;
}
single-card.component.jsx
import { Fragment } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import {
selectCards,
selectIsCardRotationDisabled,
} from '../../store/cards/cards.selector';
import { setCurrentImagePath } from '../../store/cards/cards.action';
import './single-card.styles.scss';
export const CARD_TYPE_CLASSES = {
easy: 'easy',
medium: 'medium',
hard: 'hard',
};
const SingleCard = ({ card }) => {
const { name, imagePath, currentImagePath } = card;
const dispatch = useDispatch();
const cards = useSelector(selectCards);
const isCardRotationDisabled = useSelector(selectIsCardRotationDisabled);
const toggleImagePath = () =>
dispatch(setCurrentImagePath(cards, [card], imagePath));
const currentImagePathHandler = () => {
if (!isCardRotationDisabled) toggleImagePath();
};
return (
<Fragment>
<img
className={`single-card-container ${
currentImagePath === imagePath ? 'unhidden' : 'hidden'
}`}
src={currentImagePath}
alt={`${name}`}
onClick={currentImagePathHandler}
/>
</Fragment>
);
};
export default SingleCard;
single-card.styles.scss | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
game, react.js, jsx, redux
export default SingleCard;
single-card.styles.scss
.single-card-container {
width: 115px;
height: 115px;
transition: transform 0.4s;
}
.hidden {
transform: rotateY(180deg);
}
.unhidden {
transform: rotateY(0deg);
}
game-area.component.jsx
import { useEffect } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import { setCards, setCurrentImagePath } from '../../store/cards/cards.action';
import { selectEmptyImagePath } from '../../store/cards/cards.selector';
import { selectGameDifficulty } from '../../store/game-difficulty/game-difficulty.selector';
import { setCounterValue } from '../../store/counter/counter.action';
import CardsDirectory from '../../components/cards-directory/cards-directory.component';
import Counter from '../../components/counter/counter.component';
import { CARD_TYPE_CLASSES } from '../../components/single-card/single-card.component';
import { CARDS_DATA } from '../../cards-data';
import Button, {
BUTTON_TYPE_CLASSES,
} from '../../components/button/button.component';
import './game-area.component.styles.scss';
const gameDifficulties = {
easy: 6,
medium: 8,
hard: 10,
};
const GameArea = () => {
const dispatch = useDispatch();
const emptyImagePath = useSelector(selectEmptyImagePath);
const gameDifficulty = useSelector(selectGameDifficulty); | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
game, react.js, jsx, redux
const getRandomCards = (cardsCount) =>
CARDS_DATA.sort(() => 0.5 - Math.random())
.filter((card) => card.id !== 0)
.slice(0, cardsCount);
const getCopiedCards = (cardsToCopy) =>
cardsToCopy.map((card) => ({
...card,
id: card.id + 10,
}));
const getShuffledCards = (cards) =>
cards.sort(() => 0.5 - Math.random());
const getCardsCount = (gameDifficulty) =>
gameDifficulties[gameDifficulty];
const startNewGame = () => {
const cardsCount = getCardsCount(gameDifficulty);
const randomCards = getRandomCards(cardsCount);
const copiedCards = getCopiedCards(randomCards);
const cards = [...randomCards.concat(copiedCards)];
const shuffledCards = getShuffledCards(cards);
const initialCounterValue = 0;
dispatch(setCards(shuffledCards));
dispatch(setCurrentImagePath(cards, shuffledCards, emptyImagePath));
dispatch(setCounterValue(initialCounterValue));
};
useEffect(() => {
startNewGame();
}, []);
return (
<div className='game-area-container'>
<CardsDirectory cardType={CARD_TYPE_CLASSES[gameDifficulty]} />
<div className='bottom-panel'>
<Button
buttonType={BUTTON_TYPE_CLASSES.restart}
type='button'
onClick={startNewGame}
>
Restart Game
</Button>
<Counter />
</div>
</div>
);
};
export default GameArea;
game-area.component.styles.scss
.bottom-panel {
display: grid;
grid-template-columns: 1fr 1fr;
justify-items: center;
}
menu.component.jsx
import { useNavigate } from 'react-router-dom';
import { useDispatch } from 'react-redux';
import { setGameDifficulty } from '../../store/game-difficulty/game-difficulty.action';
import Button, {
BUTTON_TYPE_CLASSES,
} from '../../components/button/button.component';
import './menu.styles.scss';
const Menu = () => {
const navigate = useNavigate();
const dispatch = useDispatch(); | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
game, react.js, jsx, redux
const Menu = () => {
const navigate = useNavigate();
const dispatch = useDispatch();
const goToGameAreaHandler = () => {
navigate('/game-area');
};
const difficultyLevelHandler = (event) => {
event.preventDefault();
const gameDifficulty = event.target.innerText.toLowerCase();
dispatch(setGameDifficulty(gameDifficulty));
};
return (
<div className='menu-container'>
<h1>Memo Game</h1>
<div className='difficulty-container'>
<h2>Select difficulty:</h2>
<Button
buttonType={BUTTON_TYPE_CLASSES.difficultyEasy}
type='button'
onClick={difficultyLevelHandler}
>
Easy
</Button>
<Button
buttonType={BUTTON_TYPE_CLASSES.difficultyMedium}
type='button'
onClick={difficultyLevelHandler}
>
Medium
</Button>
<Button
buttonType={BUTTON_TYPE_CLASSES.difficultyHard}
type='button'
onClick={difficultyLevelHandler}
>
Hard
</Button>
</div>
<Button
buttonType={BUTTON_TYPE_CLASSES.start}
type='button'
onClick={goToGameAreaHandler}
>
Start Game
</Button>
</div>
);
};
export default Menu;
menu.styles.scss
.menu-container {
height: 500px;
width: 500px;
display: flex;
flex-direction: column;
align-items: center;
border-radius: 4%;
border: 5px solid #dcabae;
background-color: #ffdfdc;
color: #4a586e;
h1 {
font-size: 50px;
}
}
.difficulty-container {
display: flex;
flex-direction: column;
margin-bottom: 15px;
align-items: center;
}
reducer.utils.js
export const createAction = (type, payload) => ({ type, payload });
root-reducer.js | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
game, react.js, jsx, redux
export const createAction = (type, payload) => ({ type, payload });
root-reducer.js
import { combineReducers } from 'redux';
import { cardsReducer } from './cards/cards.reducer';
import { gameDifficultyReducer } from './game-difficulty/game-difficulty.reducer';
import { counterReducer } from './counter/counter.reducer';
export const rootReducer = combineReducers({
cards: cardsReducer,
gameDifficulty: gameDifficultyReducer,
counter: counterReducer
});
store.js
import { compose, applyMiddleware } from 'redux';
import { configureStore } from '@reduxjs/toolkit';
import { rootReducer } from './root-reducer';
import logger from 'redux-logger';
import storage from 'redux-persist/lib/storage';
import { persistStore, persistReducer } from 'redux-persist';
const persistConfig = {
key: 'root',
storage
};
const persistedReducer = persistReducer(persistConfig, rootReducer);
const middleWares = [process.env.NODE_ENV === 'development' && logger].filter(
Boolean
);
const composedEnhancers = compose(applyMiddleware(...middleWares));
export const store = configureStore({
reducer: persistedReducer,
composedEnhancers
});
export const persistor = persistStore(store);
cards.action.js
import CARDS_ACTION_TYPES from './cards.types';
import { createAction } from '../reducer.utils';
const updateCurrentImagePath = (cards, cardsToUpdate, currentImagePath) => {
return cards.map((card) => {
const existingCard = cardsToUpdate.find(
(cardToUpdate) => cardToUpdate.id == card.id
);
return existingCard !== undefined
? { ...card, currentImagePath: currentImagePath }
: card;
});
};
const updateIsCardDisabled = (cards, cardsToUpdate) => {
return cards.map((card) => {
const existingCard = cardsToUpdate.find(
(cardToUpdate) => cardToUpdate.id == card.id
); | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
game, react.js, jsx, redux
return existingCard !== undefined
? { ...card, isCardDisabled: true }
: card;
});
};
export const setCards = (cards) =>
createAction(CARDS_ACTION_TYPES.SET_CARDS, cards);
export const setCurrentImagePath = (cards, cardsToUpdate, currentImagePath) => {
const newCards = updateCurrentImagePath(
cards,
cardsToUpdate,
currentImagePath
);
return createAction(CARDS_ACTION_TYPES.SET_CARDS, newCards);
};
export const setIsCardDisabled = (cards, cardsToUpdate) => {
const newCards = updateIsCardDisabled(cards, cardsToUpdate);
return createAction(CARDS_ACTION_TYPES.SET_CARDS, newCards);
};
export const setIsRotationDisabled = (isRotationDisabled) =>
createAction(
CARDS_ACTION_TYPES.SET_IS_CARD_ROTATION_DISABLED,
isRotationDisabled
);
cards.reducer.js
import { CARDS_DATA } from '../../cards-data';
import CARDS_ACTION_TYPES from './cards.types';
const INITIAL_STATE = {
cards: [],
emptyImagePath: CARDS_DATA[0].imagePath,
isCardRotationDisabled: false,
};
export const cardsReducer = (state = INITIAL_STATE, action = {}) => {
const { type, payload } = action;
switch (type) {
case CARDS_ACTION_TYPES.SET_CARDS:
return {
...state,
cards: payload,
};
case CARDS_ACTION_TYPES.SET_IS_CARD_ROTATION_DISABLED:
return {
...state,
isCardRotationDisabled: payload,
};
default:
return state;
}
};
cards.selector.js
export const selectCards = (state) =>
state.cards.cards;
export const selectEmptyImagePath = (state) =>
state.cards.emptyImagePath;
export const selectIsCardRotationDisabled = (state) =>
state.cards.isCardRotationDisabled;
cards.types.js
const CARDS_ACTION_TYPES = {
SET_CARDS: 'SET_CARDS',
SET_IS_CARD_ROTATION_DISABLED: 'SET_IS_CARD_ROTATION_DISABLED'
}; | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
game, react.js, jsx, redux
export default CARDS_ACTION_TYPES;
counter.action.js
import { createAction } from "../reducer.utils"
import COUNTER_ACTION_TYPES from "./counter.types";
export const setCounterValue = (counterValue) =>
createAction(COUNTER_ACTION_TYPES.SET_COUNTER_VALUE, counterValue);
counter.reducer.js
import COUNTER_ACTION_TYPES from './counter.types';
const INITIAL_STATE = {
counterValue: 0
};
export const counterReducer = (state = INITIAL_STATE, action = {}) => {
const { type, payload } = action;
switch(type) {
case COUNTER_ACTION_TYPES.SET_COUNTER_VALUE:
return {
...state,
counterValue: payload
};
default:
return state;
}
};
counter.selector.js
export const selectCounterValue = (state) =>
state.counter.counterValue;
counter.types.js
const COUNTER_ACTION_TYPES = {
SET_COUNTER_VALUE: 'SET_COUNTER_VALUE'
};
export default COUNTER_ACTION_TYPES;
game-difficulty.action.js
import GAME_DIFFICULTY_ACTION_TYPES from './game-difficulty.types';
import { createAction } from '../reducer.utils';
export const setGameDifficulty = (gameDifficulty) =>
createAction(GAME_DIFFICULTY_ACTION_TYPES.SET_CARD_COUNT, gameDifficulty);
game-difficulty.reducer.js
import GAME_DIFFICULTY_ACTION_TYPES from './game-difficulty.types';
const INITIAL_GAME_DIFFICULTY_STATE = {
difficulty: 'easy',
};
export const gameDifficultyReducer = (
state = INITIAL_GAME_DIFFICULTY_STATE,
action = {}
) => {
const { type, payload } = action;
switch (type) {
case GAME_DIFFICULTY_ACTION_TYPES.SET_CARD_COUNT:
return {
...state,
difficulty: payload,
};
default:
return state;
}
};
game-difficulty.selector.js
export const selectGameDifficulty = (state) =>
state.gameDifficulty.difficulty;
game-difficulty.types.js | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
game, react.js, jsx, redux
game-difficulty.types.js
const GAME_DIFFICULTY_ACTION_TYPES = {
SET_CARD_COUNT: 'SET_CARD_COUNT'
};
export default GAME_DIFFICULTY_ACTION_TYPES;
Answer: You're doing great and this code is very clean and well-organized. Keep up the good work!
The biggest area for improvement that I see is in the cards.action.js file. You have a lot of action creators which are doing work that belongs in the reducer. The action should describe what happened, which is that card 123 was disabled. It's the job of the reducer to determine what to do about that, which is to update the array of cards so that the card you disabled has isCardDisabled: true. This is discussed in more detail in the Model Actions as Events, Not Setters section of the Redux Style Guide.
As a result of putting all the logic in your action creators, you're also having to pass way too much data to your actions. An action to disable one or more individual cards should not need to know about all of the cards in the game. You should only pass enough information to describe the event that happened. That is: which card ids are effected, and whether you are enabling or disabling them.
These are examples of good actions:
dispatch(disableCard(123));
dispatch(disableCards([123, 124]));
dispatch(toggleCardsDisabled({ ids: [123, 124], isDisabled: false }));
I always recommend that people use Redux Toolkit because it's such a great tool. You're taking steps to make your Redux code clean and maintainable with your reducer.utils.js file and createAction utility. If you follow down that path far enough then you'll eventually recreate Redux Toolkit. But it's already been created and you can just use it! | {
"domain": "codereview.stackexchange",
"id": 44360,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "game, react.js, jsx, redux",
"url": null
} |
sql, mysql, join
Title: Find offers that can be matched with bids
Question: I am working on a sql query for the following scenario:
Get all available offers for registered demand (offers from profiles
should not be from profile which had registered demand). After that
get all available offers from this initial profile which match any
demand from any other profile.
I ended up with this query:
SELECT *
FROM
(
SELECT *
FROM test_db_swap.Service
WHERE offer = 1
AND profileId != 3622
AND title LIKE('%Software%')
) AS offers
LEFT OUTER JOIN LATERAL
(
SELECT q1.id as Q1Id, q2.id as Q2Id, q1.title as Q1Title, q2.title as Q2Title, q1.profileId as Q1ProfileId, q2.profileId as Q2ProfileId
FROM
(
SELECT *
FROM test_db_swap.Service
WHERE
(
offer = 1
AND profileId = 3622
)
) as q1
INNER JOIN LATERAL
(
SELECT *
FROM test_db_swap.Service
WHERE
(
offer = 0
AND profileId != 3622
AND title LIKE(q1.title)
)
) q2
ON q1.title = q2.title
) AS giveBack
ON offers.profileId = giveBack.Q2ProfileId | {
"domain": "codereview.stackexchange",
"id": 44361,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "sql, mysql, join",
"url": null
} |
sql, mysql, join
I will need a pagination for this query, but I think I will do it with LIMIT 20,40; clause. My main concern is performance overhead due multiple subqueries and multiple joins. It should not be a big issue when I will add a pagination, but anyway.
Also engineering perfection - how it can be done better, - is my concern too as DBA is not my specialisation.
Thank you in advance.
Table schema:
id | title | expirationDate | offer | index | profileId
'12206', 'Software Development', '1673965266401', '1', NULL, '3621'
'12208', 'Consulting', '1673965266401', '1', NULL, '3621'
'12209', 'Product management', '1673965266401', '1', NULL, '3621'
'12210', 'Product management', '1673965266401', '1', NULL, '3622'
'12211', 'Product management', '1673965266401', '0', NULL, '3621'
'12212', 'Consulting', '1673965266401', '1', NULL, '3622'
'12213', 'Software Development', '1673965266401', '0', NULL, '3622'
EXPLAIN SELECT:
id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra
'1', 'SIMPLE', 'Service', NULL, 'ALL', 'fk_id', NULL, NULL, NULL, '7', '14.29', 'Using where; Using temporary; Using filesort'
'1', 'SIMPLE', 'Service', NULL, 'ALL', 'fk_id', NULL, NULL, NULL, '7', '100.00', 'Using where; Using join buffer (hash join)'
'1', 'SIMPLE', 'Service', NULL, 'ALL', 'fk_id', NULL, NULL, NULL, '7', '100.00', 'Using where; Using join buffer (hash join)'
Answer: It's hard to see how this could be a performant query.
(
SELECT *
FROM test_db_swap.Service
WHERE offer = 1
AND profileId != 3622
AND title LIKE('%Software%')
) AS offers
LEFT OUTER JOIN ... | {
"domain": "codereview.stackexchange",
"id": 44361,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "sql, mysql, join",
"url": null
} |
sql, mysql, join
I am assuming that cardinality of offer is low,
cardinality of profileId is high,
and 3622 is just an example which will
change from query to query.
Given a million distinct profile IDs,
we might have roughly a three million row table.
Put another way, a WHERE profileId = NNN query will enjoy very good
selectivity.
I assume we have an index available on profileId,
and another on title.
I am sad that you did not include any COUNT(*)
or EXPLAIN PLAN output in the question.
Let's look at those three conjuncts.
We're filtering on offer, fine, it cannot be usefully indexed,
as it takes on just a handful of distinct values.
We're table scanning for all mismatched profile IDs,
filtering out the handful of matches.
And we're similarly table scanning / filtering
for title, since the leading % wildcard
prevents MariaDB / Mysql from exploiting a title index.
Sounds bad.
Every paginated query is going to have to perform
a full table scan, every single time.
Even if all rows fit in the cache, you're
going to burn CPU cycles examining all rows
every single time.
This is independent of examining what the
rest of the query does.
Further down we see this:
AND title LIKE(q1.title)
That is definitely sub-optimal.
Prefer an equality check, rather than LIKE.
So there's some potential for the query
planner to exploit a title index.
A conjunct of AND title LIKE X,
where the planner can see the literal text of X,
may or may not use an index depending on wildcard
characters in X seen by the planner.
When X comes from query rows, then all such bets are off.
AND title LIKE('%Software%') | {
"domain": "codereview.stackexchange",
"id": 44361,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "sql, mysql, join",
"url": null
} |
sql, mysql, join
Consider using mysql
full text
indexing, with the MATCH keyword.
Postgres also has
excellent
support for such queries.
Better still, do some analysis up-front to simplify the query.
Table scan once to obtain all DISTINCT titles,
storing the result in a table or in memory.
Filter those titles to obtain the software-related titles,
maybe one or a half dozen such titles.
Pose the conjunct as: AND title IN ('this', 'that', 'other');
That way the planner will request just a handful of index probes,
rather than a full scan.
Given the apparent low cardinality of title,
this probably isn't a very interesting aspect to optimize.
For predictable output,
you probably want to include an ORDER BY.
Especially if you plan to paginate.
Let's say that table scanning is "too expensive"
for interactive production queries.
The query in its current form can't really be saved.
It requires table scan, every time. (Or at least a full
index scan given a
covering
index, which would be the moral equivalent.)
Assuming that UPDATE / INSERTs which materially affect query
results will happen "infrequently",
you will need to store results indexed by profile ID
in a MATERIALIZED VIEW. Then do some (cheap) filtering
on title at query time. | {
"domain": "codereview.stackexchange",
"id": 44361,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "sql, mysql, join",
"url": null
} |
c++, performance, graph
Title: Prune unreachable sections of a directed graph
Question: I have a graph represented with an Adjacency set similar to:
struct Vertex {
int x;
bool operator==(const Vertex& b) {
return x==b.x;
}
};
template<> struct std::hash<Vertex> {
std::size_t operator()(Vertex const& v) const noexcept {
return std::hash<int>()(v.x);
}
};
struct Edge {
std::shared_ptr<Vertex> fr;
std::shared_ptr<Vertex> to;
double weight;
Edge(std::shared_ptr<Vertex> fr_in, std::shared_ptr<Vertex> to_in) : fr(fr_in), to(to_in) {};
};
class Graph{
public:
std::shared_ptr<Vertex> addVertex() {
auto new_vertex = std::make_shared<Vertex>();
mAdjacencySet[new_vertex] = {};
return new_vertex;
}
std::shared_ptr<Edge> addEdge(std::shared_ptr<Vertex> fr, std::shared_ptr<Vertex> to) {
auto edge = std::make_shared<Edge>(fr, to);
mAdjacencySet[fr][to] = edge;
return edge;
}
void deleteVertex(std::shared_ptr<Vertex> v) {
mAdjacencySet.erase(v);
for (auto& [key, val] : mAdjacencySet) {
val.erase(v);
}
};
private:
std::unordered_map<
std::shared_ptr<Vertex>,
std::unordered_map<
std::shared_ptr<Vertex>,
std::shared_ptr<Edge>,
Deref::Hash,
Deref::Compare
>,
Deref::Hash,
Deref::Compare
> mAdjacencySet;
};
After I build my graph, I need to prune as many edges as possible because they are expensive to calculate.
One of the strategies to do so, is to delete any vertices without any outward edges (out degree of zero). This is very slow to do, relative to the rest of my program.
I wrote a script to time the relative parts of the complexity of each part:
int main() {
Timer wholeProgram;
wholeProgram.start();
Graph g;
auto v1 = g.addVertex();
auto v2 = g.addVertex();
auto e = g.addEdge(v1, v2); | {
"domain": "codereview.stackexchange",
"id": 44362,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, performance, graph",
"url": null
} |
c++, performance, graph
Timer makingVertices;
makingVertices.start();
size_t n = 1e3;
std::vector<std::shared_ptr<Vertex>> vertices(n);
for (size_t i=0; i<n; ++i) {
vertices[i] = g.addVertex();
vertices[i]->x = i;
}
makingVertices.stop();
Timer makingEdges;
makingEdges.start();
for (auto v1 : vertices) {
for (auto v2: vertices) {
if (v1!=v2) {
g.addEdge(v1, v2);
}
}
}
makingEdges.stop();
Timer deletingVertices;
deletingVertices.start();
for (auto vert : vertices) {
g.deleteVertex(vert);
}
deletingVertices.stop();
wholeProgram.stop();
std::cout << "Making Verts: " << makingVertices.elapsedMilliseconds() << std::endl;
std::cout << "Making edges: " << makingEdges.elapsedMilliseconds() << std::endl;
std::cout << "Deleting verts: " << deletingVertices.elapsedMilliseconds() << std::endl;
std::cout << "Whole program: " << wholeProgram.elapsedMilliseconds() << std::endl;
return 0;
}
And the timings (with '-O3') are:
Making Verts: 0
Making edges: 270
Deleting verts: 188
Whole program: 458
(In my actual code base, the deleting of the vertices is actually around 90% of the time to create the graph).
How can I optimize this code to reduce the time to delete vertices (And also I guess optimize the creation of edges, as this is also slow)?
The full code to run this example is:
#include <functional>
#include <memory>
#include <chrono>
#include <iostream> | {
"domain": "codereview.stackexchange",
"id": 44362,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, performance, graph",
"url": null
} |
c++, performance, graph
class Timer
{
public:
void start()
{
m_StartTime = std::chrono::system_clock::now();
m_bRunning = true;
}
void stop()
{
m_EndTime = std::chrono::system_clock::now();
m_bRunning = false;
}
double elapsedMilliseconds()
{
std::chrono::time_point<std::chrono::system_clock> endTime;
if(m_bRunning)
{
endTime = std::chrono::system_clock::now();
}
else
{
endTime = m_EndTime;
}
return std::chrono::duration_cast<std::chrono::milliseconds>(endTime - m_StartTime).count();
}
double elapsedSeconds()
{
return elapsedMilliseconds() / 1000.0;
}
private:
std::chrono::time_point<std::chrono::system_clock> m_StartTime;
std::chrono::time_point<std::chrono::system_clock> m_EndTime;
bool m_bRunning = false;
};
struct Deref {
/**
* @brief Function to dereference the pointer when hashing elements in a hashmap of shared pointers
*
*/
struct Hash {
template <typename T> std::size_t operator()(std::shared_ptr<T> const& p) const
{
return std::hash<T>()(*p);
}
template <typename T> std::size_t operator()(T const & p) const
{
return std::hash<T>(p);
}
};
/**
* @brief Function to dereference the pointer when comparing elements in a hashmap of shared pointers
*
*/
struct Compare {
template <typename T> bool operator()(std::shared_ptr<T> const& a, std::shared_ptr<T> const& b) const
{
return *a == *b;
}
template <typename T> bool operator()(T const& a, T const& b) const
{
return a == b;
}
};
};
struct Vertex {
int x;
bool operator==(const Vertex& b) {
return x==b.x;
}
}; | {
"domain": "codereview.stackexchange",
"id": 44362,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, performance, graph",
"url": null
} |
c++, performance, graph
struct Vertex {
int x;
bool operator==(const Vertex& b) {
return x==b.x;
}
};
template<> struct std::hash<Vertex> {
std::size_t operator()(Vertex const& v) const noexcept {
return std::hash<int>()(v.x);
}
};
struct Edge {
std::shared_ptr<Vertex> fr;
std::shared_ptr<Vertex> to;
double weight;
Edge(std::shared_ptr<Vertex> fr_in, std::shared_ptr<Vertex> to_in) : fr(fr_in), to(to_in) {};
};
class Graph{
public:
std::shared_ptr<Vertex> addVertex() {
auto new_vertex = std::make_shared<Vertex>();
mAdjacencyList[new_vertex] = {};
return new_vertex;
}
std::shared_ptr<Edge> addEdge(std::shared_ptr<Vertex> fr, std::shared_ptr<Vertex> to) {
auto edge = std::make_shared<Edge>(fr, to);
mAdjacencyList[fr][to] = edge;
return edge;
}
void deleteVertex(std::shared_ptr<Vertex> v) {
mAdjacencyList.erase(v);
for (auto& [key, val] : mAdjacencyList) {
val.erase(v);
}
};
private:
std::unordered_map<
std::shared_ptr<Vertex>,
std::unordered_map<
std::shared_ptr<Vertex>,
std::shared_ptr<Edge>,
Deref::Hash,
Deref::Compare
>,
Deref::Hash,
Deref::Compare
> mAdjacencyList;
};
int main() {
Timer wholeProgram;
wholeProgram.start();
Graph g;
auto v1 = g.addVertex();
auto v2 = g.addVertex();
auto e = g.addEdge(v1, v2);
Timer makingVertices;
makingVertices.start();
size_t n = 1e3;
std::vector<std::shared_ptr<Vertex>> vertices(n);
for (size_t i=0; i<n; ++i) {
vertices[i] = g.addVertex();
vertices[i]->x = i;
}
makingVertices.stop();
Timer makingEdges;
makingEdges.start();
for (auto v1 : vertices) {
for (auto v2: vertices) {
if (v1!=v2) {
g.addEdge(v1, v2);
}
}
}
makingEdges.stop(); | {
"domain": "codereview.stackexchange",
"id": 44362,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, performance, graph",
"url": null
} |
c++, performance, graph
Timer deletingVertices;
deletingVertices.start();
for (auto vert : vertices) {
g.deleteVertex(vert);
}
deletingVertices.stop();
wholeProgram.stop();
std::cout << "Making Verts: " << makingVertices.elapsedMilliseconds() << std::endl;
std::cout << "Making edges: " << makingEdges.elapsedMilliseconds() << std::endl;
std::cout << "Deleting verts: " << deletingVertices.elapsedMilliseconds() << std::endl;
std::cout << "Whole program: " << wholeProgram.elapsedMilliseconds() << std::endl;
return 0;
}
And to run it, you can view it online
Answer: Avoid having to loop over all vertices
In deleteVertex(), you loop over all of mAdjacencyList. However, in sparse graphs that means you unnecessarily visit a lot of vertices that weren't a neighbor of the one you deleted.
You could consider always storing edge information going both ways, but indicating in some way which directions were actually added to the graph and which not. That way, you can always efficiently look up which other vertices have an edge to a given vertex. For example, make two adjacency sets: one for incoming edges, one for outgoing ones:
class Graph{
public:
std::shared_ptr<Vertex> addVertex() {
return std::make_shared<Vertex>();
}
std::shared_ptr<Edge> addEdge(std::shared_ptr<Vertex> fr, std::shared_ptr<Vertex> to) {
auto edge = std::make_shared<Edge>(fr, to);
mOutEdges[fr][to] = edge;
mInEdges[to][fr] = edge;
return edge;
}
void deleteVertex(std::shared_ptr<Vertex> v) {
// Find neighbors using v's incoming edge list,
// and delete those neighbors' in/out edges from/to v.
for (auto& [neighbor, inEdges]: mInEdges[v]) {
inEdges.erase(v);
mOutEdges[neighbor].erase(v);
}
// Somewhat bad naming:
// this doesn't delete edges, but rather the vertex v.
mInEdges.erase(v);
mOutEdges.erase(v)
}; | {
"domain": "codereview.stackexchange",
"id": 44362,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, performance, graph",
"url": null
} |
c++, performance, graph
private:
using AdjacencySet = std::unordered_map<
std::shared_ptr<Vertex>,
std::unordered_map<โฆ>,
Deref::Hash,
Deref::Compare
>;
AdjacencySet mInEdges;
AdjacencySet mOutEdges;
};
Handle bulk updates
One reason deleting the vertices is slow in your main() is because you delete them all one by one. If you know you are going to delete all of them, you could just do mAdjacencySets.clear().
If you delete only some of the vertices, it might still be interesting to see if you can optimize this. Consider that if you delete two vertices that are neighbors of each other, you don't have to erase from each other's adjacency list.
Rethink the way you store the graph
The problem with your code is that the way you store your graph is very unoptimal. While operations on std::unordered_map are \$O(1)\$ (amortized!), that doesn't mean these operations are cheap and fast. std::shared_ptr in particular has some costs you have to consider: every time it is copied it needs to do atomic reference counting. Furthermore, it will allocate objects on the heap that might not be layed out optimally in memory. The std::unordered_maps will also allocate memory for each element they store, again not guaranteed to be nicely consecutively in memory.
Ideally, the vertices and their adjacency lists are stored in compact arrays, and vertices are labelled by their position in the array they are stored in:
struct Graph {
using VertexID = std::size_t;
struct Vertex {
โฆ
std::vector<VertexID> neighbors;
};
std::vector<Vertex> vertices;
VertexID addVertex() {
vertices.emplace_back();
return vertices.size() - 1;
}
void addEdge(VertexID from, VertexID to) {
vertices[from].neighbors.push_back(to);
}
}; | {
"domain": "codereview.stackexchange",
"id": 44362,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, performance, graph",
"url": null
} |
c++, performance, graph
Of course, the above is nice when only adding vertices and edges, removing them is problematic. There are ways to fix that; you could make vertices store std::optional<Vertex>es, so you can "delete" one without moving the other vertices, which would otherwise invalidate their indices. Scanning linearly through neighbors might sound slow but given the much better memory locality it can be more efficient if the graph is not very dense. You can make it logarithmic by keeping the neighbors sorted (in C++23 you could do that by using std::flat_set). For dense graphs you might thing about using std::unordered_set<VertexID>, or you could consider storing adjacency information in a std::vector<bool>, where every bit represents whether another vertex is a neighbor or not. The latter is \$O(1)\$ again.
Another issue with your code is that you store a lot of redundant information. An Edge does not need to have fr and to members; that information is already encoded in the keys of the std::unordered_maps.
Use of smart pointers
As already mentioned, std::shared_ptr has some overhead. You should only use it if you really need shared ownership of some object stored on the heap. For a graph however you should be able to have one container own the vertices it stores, and everything else just has a reference or pointer to those vertices. At most you need a std::unique_ptr, but since std::unordered_map already allocates memory for the objects it stores, even that shouldn't be necessary. So you could write:
std::unordered_map<Vertex, std::unordered_map<Vertex*, Edge>> mAdjacencyList;
Make vertices store their own neighbor lists
The declaration of mAdjacencyList looks quite complex. Consider storing the list of neighbors of a vertex in Vertex itself; this would simplify the code a lot:
struct Edge {
double weight;
};
struct Vertex {
int x;
std::unordered_map<Vertex*, Edge> neighbors;
};
std::unordered_set<Vertex> vertices; | {
"domain": "codereview.stackexchange",
"id": 44362,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, performance, graph",
"url": null
} |
c++, performance, graph
std::unordered_set<Vertex> vertices;
Of course, you still need to make sure the objects can be hashed if they are going to be stored in these containers. | {
"domain": "codereview.stackexchange",
"id": 44362,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, performance, graph",
"url": null
} |
vba, excel
Title: Optimising Read/Write Speed of Excel VBA Copy/Paste Macro
Question: I have an Excel sheet that connects to third party software which populates Sheet1 with data. It does this multiple times per second and overwrites previous data.
I have written the macro below to copy and paste the data to a sheet (called Data) each time there is a change to Sheet1.
The previous version of the macro looped down a range copying one row at a time which took a (relatively) long time as there can be 50000+ rows in the Data seet.
The current version uses a variant array but still seems to be very resource-hungry.
Are there any other ways it can be optimised to make it more efficient?
Thanks
Private Sub Worksheet_Change(ByVal Target As Range) | {
"domain": "codereview.stackexchange",
"id": 44363,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
vba, excel
If Target.Columns.Count <> 16 Then Exit Sub
'Count the cells to copy
Dim a As Integer
With ThisWorkbook.Worksheets("Sheet1")
lastRow = .Cells(Rows.Count, "A").End(xlUp).Row
inarr = .Range(.Cells(1, 1), .Cells(lastRow + 5, 26)) ' load all of sheet 1 data in a variant array
End With
a = lastRow
'Count the last cell where to start copying
Dim b As Long
With ThisWorkbook.Worksheets("Data")
b = .Cells(Rows.Count, "A").End(xlUp).Row
Dim c As Integer
c = 5
'Perform the copy paste process
Dim outarr() As Variant
ReDim outarr(1 To a, 1 To 22)
Application.EnableEvents = False
For i = 1 To a - 1
If ThisWorkbook.Worksheets("Sheet1").Range("E2") <> "" And ThisWorkbook.Worksheets("Sheet1").Range("F2") = "" And ThisWorkbook.Worksheets("Sheet1").Range("AB5") = "35" Then
outarr(i, 1) = inarr(3, 14)
outarr(i, 2) = inarr(2, 2)
outarr(i, 3) = inarr(1, 1)
outarr(i, 4) = inarr(2, 5)
outarr(i, 5) = inarr(c, 26)
outarr(i, 6) = inarr(c, 1)
outarr(i, 7) = inarr(c, 6)
outarr(i, 8) = inarr(c, 8)
outarr(i, 9) = inarr(c, 15)
outarr(i, 10) = inarr(c, 16)
outarr(i, 11) = inarr(3, 2)
outarr(i, 12) = inarr(c, 7)
outarr(i, 13) = inarr(c, 2)
outarr(i, 14) = inarr(c, 3)
outarr(i, 15) = inarr(c, 4)
outarr(i, 16) = inarr(c, 5)
outarr(i, 17) = inarr(c, 9)
outarr(i, 18) = inarr(c, 12)
outarr(i, 19) = inarr(c, 13)
outarr(i, 20) = inarr(c, 10)
outarr(i, 21) = inarr(c, 11)
outarr(i, 22) = inarr(c, 25)
c = c + 1
End If
Next i | {
"domain": "codereview.stackexchange",
"id": 44363,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
vba, excel
c = c + 1
End If
Next i
ThisWorkbook.Worksheets("Data").Range(.Cells(b + 1, 1), .Cells(b + a - 4, 22)) = outarr
End With
Application.EnableEvents = True | {
"domain": "codereview.stackexchange",
"id": 44363,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
vba, excel
'End If
Dim lastcell As Range
Dim wsStore As Worksheet
Set wsStore = ThisWorkbook.Worksheets("Store")
Set lastcell = wsStore.Cells(wsStore.Rows.Count, 1).End(xlUp)
With ThisWorkbook.Worksheets("Sheet1").Range("F2")
're-set F2 when last cell of the Store sheet is no longer the same as the value in N3
If .Value = "Closed" And Val(.ID) <> xlOff Then
.ID = xlOff
Call CopyToStore
Call ClearData
ElseIf .Offset(1, 8).Value <> lastcell.Value Then
.ID = xlOn
End If
End With
End Sub
Answer: Option Explicit
(Best Practice) Always declare Option Explicit at the top of your modules. This allows the compiler to flag the use of variables that have not been explicitly declared. This helps to reveal hard to find bugs - especially those due to typos. Make it automatic: in the VBIDE, check the 'Tools -> Options... -> (Editor tab) 'Require Variable Declaration' option. FWIW: Declaring it for the posted code found 3 undeclared variables.
Optimization
When optimizing a loop, the first step is to remove everything that does not absolutely have to execute within the loop.
As written, the statement...
If ThisWorkbook.Worksheets("Sheet1").Range("E2") <> "" And ThisWorkbook.Worksheets("Sheet1").Range("F2") = "" And ThisWorkbook.Worksheets("Sheet1").Range("AB5") = "35" Then
is located such that it is executed for every value of i...but the outcome of the expression does not depend on i. So, this statement, which currently de-references ThisWorkbook.Worksheets("Data") 3 (x 50000+) times, can be moved outside of the loop and executed only once. Organizing the code as follows, removes the inefficiency:
Dim copyArrayElements As Boolean
With ThisWorkbook.Worksheets("Sheet1")
copyArrayElements = .Range("E2") <> "" And .Range("F2") = "" And .Range("AB5") = "35"
End With
If copyArrayElements Then | {
"domain": "codereview.stackexchange",
"id": 44363,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
vba, excel
If copyArrayElements Then
For i = 1 To a - 1
outarr(i, 1) = inarr(3, 14)
'...the rest of the assignments
outarr(i, 22) = inarr(c, 25)
c = c + 1
Next i
End If
Application flags
Within the subroutine, Application.EnableEvents is set to False and then reset to True. It is important that this flag is reliably reset. However, it is possible for an error to occur before it is reset. The simplest change to ensure this flag is always reset is to add error handling to the subroutine. Like:
Private Sub Worksheet_Change(ByVal Target As Range)
If Target.Columns.Count <> 16 Then Exit Sub
On Error GoTo ErrorExit
'... the rest of the subroutine...
ErrorExit:
Application.EnableEvents = True
'Any other code that HAS to execute in the event of an error
End Sub
Even though 'Application.EnableEvents' is set to True before the end of the function (when there are no error(s)), there is no harm in setting it again in the error handler.
Big 'With' blocks
With statements can make code easier to read, and in some cases execute a little faster. However, if there are a lot of lines of code between the With and End With, it is easy to lose track of what object is used within the With statement. That appears to have happened here:
Dim b As Long
With ThisWorkbook.Worksheets("Data")
b = .Cells(Rows.Count, "A").End(xlUp).Row
'... the rest of code within the With block
ThisWorkbook.Worksheets("Data").Range(.Cells(b + 1, 1), .Cells(b + a - 4, 22)) = outarr
End With | {
"domain": "codereview.stackexchange",
"id": 44363,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
vba, excel
The last statement in the With block should be .Range(.Cells(b + 1, 1), .Cells(b + a - 4, 22)) = outarr. There are so many lines of content between the start and end of the With block, that the purpose of the With block is easily lost. And, as far as I can tell, there is only one other use of the '.' operator(b = .Cells(Rows.Count, "A").End(xlUp).Row). In this case, the code will not run perceptibly slower if this With block is removed. But, there is definitely a reduction in the cognitive effort to understand the code.
Validations
Private Sub Worksheet_Change(ByVal Target As Range)
If Target.Columns.Count <> 16 Then Exit Sub
The above statement validates the Target parameter. Anything other than a target Range of exactly 16 columns does not warrant executing the subsequent code. And, it looks like there may be other criteria as well. The code example above that declared and set the variable copyArrayElements looks like a candidate to execute at the top of the subroutine. If copyArrayElements is False, does it make sense to execute any of the remaining code?
So, perhaps...
Private Sub Worksheet_Change(ByVal Target As Range)
If Target.Columns.Count <> 16 Then Exit Sub
Dim copyArrayElements As Boolean
With ThisWorkbook.Worksheets("Sheet1")
copyArrayElements = .Range("E2") <> "" And .Range("F2") = "" And .Range("AB5") = "35"
End With
If Not copyArrayElements Then Exit Sub
would be an appropriate context validation criteria as well | {
"domain": "codereview.stackexchange",
"id": 44363,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "vba, excel",
"url": null
} |
java, event-handling, concurrency, stack
Title: Delayed, concurrent event stack in Java - follow-up
Question: I have slightly refactored the Delayed, concurrent event stack in Java. Now it looks like this:
DelayedEventStack.java
package com.github.coderodde.cconcurrent.eventstack;
import java.util.Deque;
import java.util.Objects;
import java.util.concurrent.ConcurrentLinkedDeque;
/**
* This class implements an delayed event stack.
*
* @author Rodion "rodde" Efremov
* @version 1.61 (Jan 14, 2023)
* @since 1.6 (Jan 12, 2023)
*/
public final class DelayedEventStack implements AutoCloseable {
/**
* This static inner class implements the event stack entry.
*/
private static final class DelayedEventStackEntry {
private final Runnable onExpired;
private final long expirationMillis;
DelayedEventStackEntry(Runnable onExpired, long expirationMillis) {
this.onExpired =
Objects.requireNonNull(onExpired, "onExpired is null.");
this.expirationMillis = expirationMillis;
}
}
/**
* The event loop flag.
*/
private volatile boolean doRunFlag = true;
/**
* The actual event stack.
*/
private final Deque<DelayedEventStackEntry> delayedEventStack =
new ConcurrentLinkedDeque<>();
/**
* This flag specifies whether the stack runs the leftover events in the
* stack upon closing the stack.
*/
private final boolean dischargeRemainingEventsOnClose;
/**
* The worker thread.
*/
private final Thread workerThread = new Thread() {
@Override
public void run() {
while (doRunFlag) {
// Ask delayedEventStack for a topmost event entry. Gets null if
// the stack is empty, in which case we sleep a millisecond and
// ask one more time.
DelayedEventStackEntry topmostEventStackEntry =
delayedEventStack.peekLast(); | {
"domain": "codereview.stackexchange",
"id": 44364,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, event-handling, concurrency, stack",
"url": null
} |
java, event-handling, concurrency, stack
if (topmostEventStackEntry == null
|| System.currentTimeMillis() <
topmostEventStackEntry.expirationMillis) {
// Once here, nothing to do.
Utils.sleep(1L);
} else {
delayedEventStack.removeLast().onExpired.run();
}
}
}
};
public DelayedEventStack() {
this(true);
}
public DelayedEventStack(boolean dischargeRemainingEventsOnClose) {
this.dischargeRemainingEventsOnClose = dischargeRemainingEventsOnClose;
workerThread.start();
}
public void add(Runnable onAdd, Runnable onExpired, long durationMillis) {
delayedEventStack.addLast(
new DelayedEventStackEntry(
onExpired,
System.currentTimeMillis() + durationMillis));
onAdd.run();
}
@Override
public void close() {
doRunFlag = false;
if (dischargeRemainingEventsOnClose) {
while (!delayedEventStack.isEmpty()) {
delayedEventStack.removeLast().onExpired.run();
}
}
}
}
Utils.java
package com.github.coderodde.cconcurrent.eventstack;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* This class is supposed to hold miscellaneous utility methods.
*
* @author Rodion "rodde" Efremov
* @version 1.6 (Jan 14, 2023)
* @since 1.6 (Jan 14, 2023)
*/
public final class Utils {
private static final Logger LOGGER =
Logger.getLogger(Utils.class.getName());
private Utils() {
}
public static void sleep(long millis) {
try {
Thread.sleep(millis);
} catch (InterruptedException ex) {
LOGGER.log(Level.WARNING, "Interrupted while sleeping.");
}
}
}
Demo.java
package com.github.coderodde.cconcurrent.eventstack; | {
"domain": "codereview.stackexchange",
"id": 44364,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, event-handling, concurrency, stack",
"url": null
} |
java, event-handling, concurrency, stack
Demo.java
package com.github.coderodde.cconcurrent.eventstack;
import static com.github.coderodde.cconcurrent.eventstack.Utils.sleep;
import java.util.logging.Level;
import java.util.logging.Logger;
public final class Demo {
public static void main(String[] args) {
System.out.println("Simulation started.");
DelayedEventStack eventStack = new DelayedEventStack();
eventStack.add(() -> { System.out.println("Event 1 start"); },
() -> { System.out.println("Event 1 end");},
10_000L);
sleep(2_000L);
eventStack.add(() -> { System.out.println("Event 2 start"); },
() -> { System.out.println("Event 2 end");},
3000L);
sleep(7_000L);
eventStack.add(() -> { System.out.println("Leftover event start"); },
() -> { System.out.println("Leftover event end"); },
10_000L);
CloseThread closeThread = new CloseThread(eventStack);
closeThread.start();
}
}
/**
* This class is responsible for closing the event stack.
*/
class CloseThread extends Thread {
private static final Logger logger =
Logger.getLogger(CloseThread.class.getName());
private final DelayedEventStack delayedEventStack;
CloseThread(DelayedEventStack delayedEventStack) {
this.delayedEventStack = delayedEventStack;
}
@Override
public void run() {
Utils.sleep(10_000L);
try {
delayedEventStack.close();
} catch (Exception ex) {
logger.log(
Level.SEVERE,
"An exception was thrown upon closing the event stack: {0}",
ex.getMessage());
}
}
}
Critique request
Since I am not good at writing concurrent code, I need your, guys, help to make it mature. | {
"domain": "codereview.stackexchange",
"id": 44364,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, event-handling, concurrency, stack",
"url": null
} |
java, event-handling, concurrency, stack
Answer: Biggest problem I see is busy waiting.
Waiting even 1ms in a loop is not a good practice, because you are wasting CPU anyways. Better option would be to wait indefinitely if stack is empty or wait for the time remaining until "top" event expiration.
If we add new event, we should add it to the stack first and then notify waiting thread, which will re-check the top of the stack and act accordingly.
close() method is a bit trickier and depends what you want to do here. One good approach is to forbid adding new events and finish when stack is empty, i.e. don't wait if no elements, but just return. Anyways we should account that working thread might be waiting at the time we call close(), so we might need to notify it.
Code tips:
use push(), pop() and peek() instead of ..Last() for better readability
rename class methods to push() and pop() (class is called ...Stack afterall)
give worker thread a name
public final class DelayedEventStack implements AutoCloseable {
private volatile boolean doRunFlag = true;
// no need for synchronized collection, since we operate it under the lock (max 1 thread is modifying/reading it at the time)
private final Deque<DelayedEventStackEntry> delayedEventStack = new ArrayDeque<>();
private final boolean dischargeRemainingEventsOnClose;
private final ReentrantLock lock;
private final Condition available;
private final Thread workerThread = new Thread(() -> {
while (doRunFlag) {
try {
// note: here we don't hold a lock while running
pop().onExpired.run();
} catch (InterruptedException e) {
// process exception
}
}
}, "DelayedEventStackThread"); | {
"domain": "codereview.stackexchange",
"id": 44364,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, event-handling, concurrency, stack",
"url": null
} |
java, event-handling, concurrency, stack
public DelayedEventStack(boolean dischargeRemainingEventsOnClose) {
this.dischargeRemainingEventsOnClose = dischargeRemainingEventsOnClose;
lock = new ReentrantLock();
available = lock.newCondition();
workerThread.start();
}
private DelayedEventStackEntry pop() throws InterruptedException {
// we should be holding the lock while calling `await()`
lock.lock();
try {
// need a loop here to re-check the top
while (true) {
DelayedEventStackEntry top = delayedEventStack.peek();
if (top == null) {
if (!doRunFlag) return new DelayedEventStackEntry(() -> {}, 0);
// if empty - wait indefinitely i.e. until we got signalled
available.await();
} else {
long delay = top.expirationMillis - System.currentTimeMillis();
if (delay <= 0) {
// event is ready to be executed
return delayedEventStack.pop();
}
// wait until event start time
available.awaitNanos(delay * 1000000);
}
}
} finally {
lock.unlock();
}
}
public void push(Runnable onExpired, long durationMillis) {
if (!doRunFlag)
throw new UnsupportedOperationException("Event stack is closed; adding new events is impossible");
lock.lock();
try {
delayedEventStack.push(new DelayedEventStackEntry(onExpired, System.currentTimeMillis() + durationMillis));
// signal waiting thread to re-check
available.signal();
} finally {
lock.unlock();
}
} | {
"domain": "codereview.stackexchange",
"id": 44364,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, event-handling, concurrency, stack",
"url": null
} |
java, event-handling, concurrency, stack
@Override
public void close() {
doRunFlag = false;
lock.lock();
try {
// if we don't want to run reminaing events - just remove them from the stack
if (!dischargeRemainingEventsOnClose) {
delayedEventStack.clear();
}
// wake up waiting thread to re-check top
available.notify();
} finally {
lock.unlock();
}
}
}
NB: code is not tested | {
"domain": "codereview.stackexchange",
"id": 44364,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "java, event-handling, concurrency, stack",
"url": null
} |
c++, performance, algorithm, mathematics
Title: Euler's brick generator in C++
Question: I stumbled upon this unsolved problem of math named Euler's brick. I wrote a program in C++ to generate solutions for Euler's brick problem.
It searches from 1 to 10,000 in about 10 seconds.
It took 10.977153 minutes to search from 1 to 50,000.
How can I increase the performance further so I can search for larger limits? Are there any important optimizations that I am missing?
Here is my current code:
#include <iostream>
#include <vector>
#include <array>
#include <math.h>
#include <chrono>
inline bool is_whole(double x) {
return x - int(x) == 0;
}
int main() {
auto start = std::chrono::high_resolution_clock::now();
std::vector<std::array<int, 3>> solutions = {};
unsigned long int i = 1;
unsigned long int limit = 10000;
while (i < limit) {
int a = i;
for (int b = i; b < limit; b++) {
if (is_whole(sqrt(double((a*a + b*b))))) {
for (int c = b; c < limit; c++) {
if (
is_whole(sqrt(double((a*a + c*c)))) &&
is_whole(sqrt(double((b*b + c*c))))
) {
std::array<int, 3> sol = {a, b, c};
solutions.push_back(sol);
}
}
}
}
++i;
}
auto stop = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start);
std::cout << solutions.size() << " solutions found in " << duration.count() << " microseconds..." << std::endl;
for (std::array<int, 3> sol : solutions) {
if (is_whole(sqrt(sol[0] * sol[0] + sol[1] * sol[1] + sol[2]))) {
std::cout << "\n\nPerfect cuboid found!!" << "[" << sol[0] << ", " << sol[1] << ", " << sol[2] << "]\n" << std::endl;
}
std::cout << "[" << sol[0] << ", " << sol[1] << ", " << sol[2] << "], ";
}
return 0;
} | {
"domain": "codereview.stackexchange",
"id": 44365,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, performance, algorithm, mathematics",
"url": null
} |
c++, performance, algorithm, mathematics
Answer: I recommend including <cmath> rather than the deprecated <math.h>, which should only be used in bilingual (C/C++) contexts. The C++ version ensures that identifiers are properly namespaced (e.g. std::sqrt), helping make your code clearer.
I think it would be clearer to define a simple Box type instead of std::array<int,3>, preferably with unsigned dimensions rather than signed.
There's lots of conversion between signed and unsigned types (including a which is always equal to static_cast<int>(i)) that can be eliminated. Remove i and make a, b and c share a single type (name it, in case you need to change it later).
Overuse of std::endl - I don't think there's any need to flush the output stream anywhere before it's closed (after main() returns), so just replace all those with plain newlines ("\n"). And make sure we don't finish the program with a partially-written line (at best, that's annoying to users).
is_whole() appears subject to floating-point rounding errors. I'd replace it with an is_perfect_square() that accepts an integer type and can be exact.
This test looks incorrect:
if (is_whole(sqrt(sol[0] * sol[0] + sol[1] * sol[1] + sol[2]))) {
I'm guessing you meant sol[2] * sol[2] as the last addend.
To make the search faster, we'll need to abandon brute-force search, and use one of the algorithms that generates Pythagorean triples, at least for the first two dimensions.
The Wikipedia entry for Perfect cuboid contains a lot of constraints that could prune the search. | {
"domain": "codereview.stackexchange",
"id": 44365,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, performance, algorithm, mathematics",
"url": null
} |
rust, pointers
Title: Rust: Splitting a mutable slice into disjoint, but non-contiguous subslices
Question: For some context, this is inspired by my attempt to solve this SO question.
I have a mutably borrowed slice representing a 2D array, and I want to split the borrow such that I can access all the rows or all the columns at the same time. Splitting into rows is easy, I just have to call split_at_mut repeatedly to obtain a mutable subslice for each row.
But columns are harder because the elements in a column are not contiguous in the slice. My idea is to create a pseudo-slice (Column) that implements Index such that it accesses the elements in a column inside the original slice. The Column struct has a period that is the width of a row (i.e. the number of columns), and an offset that is the index of the column. Columns that have the same period but different offsets are guaranteed to be disjoint, so sharing the same mutable borrow should be safe.
The way to create them is to construct a ColumnIterMut iterator, which will yield all the columns for a specified period.
To construct multiple mutable columns sharing the same slice simultaneously, I had to use raw pointers. I've been writing Safe Rust for a fair amount of time now, but I'm still just learning Unsafe Rust. So, most importantly, I would like you to check if my implementation is actually sound. I'm mainly concerned by aliasing rules and conforming to the Stacked Borrows model. Also, is it correct for me to implement Send and Sync here? The code works and even Miri doesn't complain, but I'm still not 100% sure that it won't cause UB under some circumstances.
A less important aspect is, if my code tirnd out to be correct, what additional traits/methods should I implement on Column to make it more user-friendly?
Here is the code:
use core::ops::{Index, IndexMut};
use std::marker::PhantomData;
pub struct ColumnIterMut<'a, T>{
data: &'a mut [T],
period: usize,
offset: usize
} | {
"domain": "codereview.stackexchange",
"id": 44366,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "rust, pointers",
"url": null
} |
rust, pointers
pub struct ColumnIterMut<'a, T>{
data: &'a mut [T],
period: usize,
offset: usize
}
impl<'a, T> ColumnIterMut<'a, T> {
pub fn new(data: &'a mut [T], period: usize) -> Self {
assert!(period > 0);
Self { data, period, offset: 0 }
}
}
impl<'a, T> Iterator for ColumnIterMut<'a, T> {
type Item = Column<'a, T>;
fn next(&mut self) -> Option<Self::Item> {
if self.offset < self.period {
let col = Column{
ptr: self.data as *mut [T],
_lifetime: PhantomData,
offset: self.offset,
period: self.period
};
self.offset += 1;
Some(col)
} else {
None
}
}
}
//INVARIANT: period > 0
//INVARIANT: offset < period
//INVARIANT: ptr is always non-null, well-aligned and points to a valid instance of [T]
//INVARIANT: all Column structs sharing the same slice of data simultaneously
// must have equal `period`s and distinct `offset`s
pub struct Column<'a, T>{
ptr: *mut [T],
_lifetime: PhantomData<&'a mut [T]>,
period: usize,
offset: usize,
}
impl<'a, T> Column<'a, T> {
pub fn len(&self) -> usize {
unsafe{
((*self.ptr).len() + self.period - self.offset - 1) / self.period
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
fn map_index(&self, index: usize) -> usize {
index * self.period + self.offset
}
}
impl<'a, T> Index<usize> for Column<'a, T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
unsafe{
//SAFETY: if the invariants are maintained, the indices returned by
// `Self::map_index()` will be exclusive to this instance of the struct
&(*self.ptr)[self.map_index(index)]
}
}
} | {
"domain": "codereview.stackexchange",
"id": 44366,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "rust, pointers",
"url": null
} |
rust, pointers
impl<'a, T> IndexMut<usize> for Column<'a, T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
unsafe{
//SAFETY: if the invariants are maintained, the indices returned by
// `Self::map_index()` will be exclusive to this instance of the struct
&mut (*self.ptr)[self.map_index(index)]
}
}
}
unsafe impl<'a, T> Send for Column<'a, T> where [T]: Send {}
unsafe impl<'a, T> Sync for Column<'a, T> where [T]: Sync {}
GitHub
Answer: Love the idea!
Disclaimer: I am sketchy on the requirements and effects ofPhantomData, so I'll abstain from comments on that.
Differentiate Column and ColumnMut.
I'd advise calling this struct ColumnMut, and reserve Column for the non-mutable version.
Behold NonNull.
Rather than specifying as an invariant that ptr is non-null, you should use the NonNull type.
Apart from a better API and a better guarantee, this will also create a niche within the ColumnMut so that Option<ColumnMut> will have the same size as ColumnMut. Pretty neat.
pub struct ColumnMut<'a, T> {
ptr: NonNull<[T]>,
...
} | {
"domain": "codereview.stackexchange",
"id": 44366,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "rust, pointers",
"url": null
} |
rust, pointers
Consider zero-sized structs.
Slices of zero-sized structs are particular, as they reference a zero-sized array, and may have nigh arbitrary length.
You could either disable support for them outright -- panicking in the constructor for example -- or else you'll need to double check your implementation.
I'd advise creating a test with the largest Vec<()> you can get, getting a slice from that, and then testing operations on the two edge cases that are single row and single column (min/maxing length and period).
Consider implementing Default for ColumnMut.
You can use a dangling pointer and a length of 0.
(Performance) Consider a different representation.
The problem of the current representation is that len() uses / self.period. A division by a (large) integer is the slowest arithmetic operations possible: between 30 and 90 CPU cycles, when an addition/subtraction is 1 cycle and a multiplication 3 cycles. This is because it's implemented by trial division, primary school style.
It's all the more problematic here that you use len repeatedly, it's the basis of pretty much all other implementations. The cost is going to add up quickly.
Rather than storing the length of the slice (as part of [T]) you could instead directly store the length of the column (number of rows).
Similarly, you store the offset, requiring you to add it at every turn. Addition is fast (1 cycle) but that's an extra 8 bytes (+33%). Instead, you could just offset the pointer once and for all when creating the ColumnMut, and you'd be good to go.
pub struct ColumnMut<'a, T> {
ptr: NonNull<T>,
length: usize,
period: usize,
_lifetime: PhantomData<&'a mut [T]>,
} | {
"domain": "codereview.stackexchange",
"id": 44366,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "rust, pointers",
"url": null
} |
rust, pointers
impl ColumnMut<'a, T> {
/// Creates an instance of the given length, based on the given number
/// of rows and the column index.
///
/// # Safety
///
/// - The slice must be alive and must NOT be aliased for `'a`.
/// - All instances of `ColumnMut` referencing this slice MUST be defined
/// with the same number of rows and a distinct column index.
/// - `number_rows * number_columns` must be equal to `slice.len()`.
/// - `column_index` must be strictly less than `number_columns`.
pub unsafe fn new(
slice: NonNull<[T]>,
number_rows: usize,
number_columns: usize,
column_index: usize,
) -> ColumnMut<'a T> {
// Any safety invariant which CAN be verified SHOULD be verified in
// Debug mode.
debug_assert_eq!(number_rows * number_columns, slice.len());
debug_assert!(column_index < number_columns);
let length = number_rows;
let period = number_columns;
let _lifetime = PhantomData;
if length == 0 {
let ptr = slice.as_non_null_ptr();
return Self { ptr, length, period, _lifetime, };
}
// SAFETY:
// - `column_index` is within bounds of the original slice, since
// * `column_index < number_columns`,
// * `number_rows * number_columns == slice.len()`,
// * and `number_rows > 0`.
// - `column_index` does not overflow an `isize`, since it's within
// bounds of the slice and a slice size does not overflow an `isize`.
let ptr = unsafe { slice.as_non_null_ptr().add(column_index) };
Self { ptr, length, period, _lifetime, }
} | {
"domain": "codereview.stackexchange",
"id": 44366,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "rust, pointers",
"url": null
} |
rust, pointers
Self { ptr, length, period, _lifetime, }
}
You may even consider using u32, instead of usize, as having more than 4 billions of columns, or 4 billions of rows, is fairly unlikely in the first place -- it requires 4 GB of memory for the smallest 1 byte struct.
(Safety) Use the unsafe_ops_in_unsafe_fn lint.
It requires using unsafe for unsafe operations even in unsafe functions, making them stand out, and ensuring you don't forget the matching SAFETY comment.
// In lib.rs or main.rs
#![deny(unsafe_ops_in_unsafe_fn)] | {
"domain": "codereview.stackexchange",
"id": 44366,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "rust, pointers",
"url": null
} |
rust, pointers
(Safety) Don't be greedy.
It's tempting to pack as much as possible in a single unsafe statement, but the problem is that it makes it hard to properly document why each individual operation is valid.
Instead, prefer splitting each operation in a separate block, unless the same justification can made for multiple operations at once.
(Safety) Don't repeat yourself.
It's not an advice specific to safety, but it's all the more important when safety's at stake.
Part of the reason for being "greedy" (packing statements, skimping on the SAFETY comment, ...) is that you're repeating yourself. And it'd be worse if you had the appropriate get and get_mut fallible implementation, as you'd have 4 implementations of getting an element.
Instead, do it ONCE and do it WELL.
(Safety) Avoid materializing the reference to the underlying slice.
I am surprised that MIRI isn't complaining -- maybe a gap in the tests?
When materializing &mut ... from raw pointers, I recommend never materializing intermediate references. There's too much of a chance of accidentally materializing an overlap: even if afterwards you only take one field or one element, if there's a moment in time where overlap exists, who knows what the compiler/optimizer might do.
Thus, I'd recommend sticking with pointer arithmetic until you have a pointer to a guaranteed not-to-overlap piece of memory, in your case, a T.
(Safety) Putting all those remarks together.
Focusing on the new stuff:
impl<'a, T> ColumnMut<'a, T> {
/// Returns a reference to the element at the specified `index`, if within bounds.
pub fn get(&self, index: usize) -> Option<&T> {
let pointer = self.get_pointer(index)?; | {
"domain": "codereview.stackexchange",
"id": 44366,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "rust, pointers",
"url": null
} |
rust, pointers
// SAFETY:
// - `pointer` is within bounds, as per `self.get_pointer`.
// - `pointer` is correctly aligned, and points to readable memory.
// - `self` has exclusive access to `*pointer`, guaranteeing the
// absence of mutable borrow from outside `self` for the lifetime
// of `&*pointer`.
// - `&*pointer` borrows `&self`, guaranteeing the absence of mutable
// borrow from `self` for its lifetime.
Some(unsafe { &*pointer })
}
/// Returns a reference to the element at the specified `index`, if within bounds.
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
let pointer = self.get_pointer(index)?;
// SAFETY:
// - `pointer` is within bounds, as per `self.get_pointer`.
// - `pointer` is correctly aligned, and points to readable memory.
// - `self` has exclusive access to `&mut *pointer`, guaranteeing the
// absence of borrow from outside `self` for the lifetime of
// `&mut *pointer`.
// - `&mut *pointer` borrows `&self` mutably, guaranteeing the absence
// of borrow from `self` for its lifetime.
Some(unsafe { &mut *pointer })
}
// Returns a pointer to the element at the specified `index`, if within bounds.
fn get_pointer(&self, index: usize) -> Option<NonNull<T>> {
if index >= self.length {
return None;
}
if mem::size_of::<T>() == 0 {
return Some(self.ptr);
}
// SAFETY:
// - `index * self.period` is within bounds of the original slice.
// - `index * self.period` does not overflow an `isize`, since it's
// within bounds of the original slice and a slice size does not
// overflow an `isize`.
let pointer = unsafe { self.ptr.as_ptr().add(index * self.period) };
NonNull::new(pointer)
} | {
"domain": "codereview.stackexchange",
"id": 44366,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "rust, pointers",
"url": null
} |
rust, pointers
NonNull::new(pointer)
}
Having written the fallible versions, the infallible ones are dead easy:
impl<'a, T> Index<usize> for ColumnMut<'a, T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
let Some(element) = self.get(index) else {
panic!("{index} is out of bounds (>= {})", self.length);
};
element
}
}
impl<'a, T> IndexMut<usize> for ColumnMut<'a, T> {
fn index_mut(&self, index: usize) -> &mut Self::Output {
let Some(element) = self.get_mut(index) else {
panic!("{index} is out of bounds (>= {})", self.length);
};
element
}
}
Use a better named argument for ColumnIterMut::new
That period is fairly confusing. This is the number of columns, just name it so.
(Safety) What about ColumnIterMut?
I am uneasy, once again, due to the presence of the mutable slice.
After obtaining a ColumnMut from the iterator, you have both:
A mutable slice within ColumnIterMut.
A ColumnMut instance allowing to materialize a mutable reference to elements of this mutable slice.
This is a recipe for disaster, in my view, and I'd feel better if ColumnIterMut instead embedded a NonNull<[T]> so that if anyone wants elements out of that, they'll have to first do a safety assessment.
Put together:
pub struct ColumnMutIter<'a, T> {
slice: NonNull<[T]>,
length: usize,
period: usize,
offset: usize,
_lifetime: PhantomData<&'a mut [T]>,
}
impl<'a, T> ColumnMutIter<'a, T> {
pub fn new(slice: &'a mut [T], number_columns: usize) -> Self {
assert!(number_columns > 0);
// Consider this case, silent failure seems odd.
debug_assert!(number_columns <= slice.len());
let slice = NonNull::from(slice);
let length = slice.len() / number_columns;
let period = number_columns;
let offset = 0;
let _lifetime = PhantomData;
Self { slice, period, offset, _lifetime }
}
} | {
"domain": "codereview.stackexchange",
"id": 44366,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "rust, pointers",
"url": null
} |
rust, pointers
Self { slice, period, offset, _lifetime }
}
}
impl<'a, T> Iterator for ColumnMutIter<'a, T> {
type Item = ColumnMut<'a, T>;
fn next(&mut self) -> Option<Self::Item> {
if self.offset < self.period {
let result = ColumnMut::new(self.slice, self.length, self.period, self.offset);
self.offset += 1;
Some(result)
} else {
None
}
}
}
The length in my implementation is "overkill". You could do without if you removed the matching debug statement in ColumnMut::new. But it doesn't cost much to keep it, so why not?
(Safety) Your Send and Sync look odd.
A ColumnMut is a reference to a slice, not a slice itself, so I'd change the where clauses to &'a mut [T]: Send (or Sync) to match. Probably inconsequential, but it'd match exactly, rather than closely, and who knows what differences lurk there?
Where are the tests?
You didn't show any test.
It's important to have good test coverage for unsafe code, because the only analysis tools that you have (MIRI, Sanitizers, Valgrind, ...) only check that the code that is running looks OK.
So, first things first, write tests as you would normally. You want to verify that each function, in isolation, works as expected, including in edge cases:
This means checking zero-sized elements.
This means using cargo tarpaulin (or equivalent) to check code coverage and verifying that you exercise all code-paths. | {
"domain": "codereview.stackexchange",
"id": 44366,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "rust, pointers",
"url": null
} |
rust, pointers
It's unsafe code, be thorough.
On top, you also need some specific testing techniques for unsafe code.
You want to check that borrow-checking works, or that Send and Sync are not implemented when they should not be. It seems silly, but when you write unsafe code and materialize references manually, you could have a fn get(&self) -> &'static T and it would compile... and because the lifetimes differ, it would not borrow self. Or you could accidentally delete that where clause on impl Send. Time to be paranoid.
You should (ab)use documentations tests, as they can test that compilation fails for a snippet:
#[cfg(test)]
#[doc(hidden)]
pub mod compiletests {
/// ```compile_fail:Exxx
/// let mut array = [1, 2, 3];
/// let column = ColumnMut::from_slice(&mut array, 3, 0);
///
/// let first = column.get(0);
/// std::mem::drop(column); // first should borrow until next line.
/// std::mem::drop(first);
/// ```
pub fn column_mut_get_borrow() {}
}
And finally, you want to write tests specifically exercising overlapping borrows:
Between ColumnMutIter and ColumnMut: create a column, use it to access an element, create another column, use the element from the first.
Between ColumnMut instances: create two columns, grab an element (mutably) from each, use swap between them.
It's those tests that'll really validate whether the Stacked Borrow model is okay with what you're doing, or not. | {
"domain": "codereview.stackexchange",
"id": 44366,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "rust, pointers",
"url": null
} |
python, pandas
Title: Pandas Upsampling Time Series Splitting Equally the values through the weeks starting on monday
Question: I build my code studying this question: "Divide total sum equally to higher sampled time periods when upsampling with pandas".
I am wondering if can be improved the code and if it is right.
It seems working properly, but always looking for room of improvement or if there is any flipside I am not aware in the process I implemented.
Context:
I have monthly data from Influencer Activities.
This data are Reach and Engagement.
Goal:
My Goal is to split equally this information through the weeks of the month.
Weeks starting on Monday
Major Concerns:
A month is on average composed by 4.34 weeks.
Am I missing some information using the following way when a week overlap between a month and another?
#Creating a min reproducible example
import pandas as pd
import numpy as np
date_index = pd.date_range(start = '01/01/2020',
end = '01/12/2022',
freq = "MS",
inclusive = "left")
np.random.seed(0)
reach = np.random.randint(1000,10000, len(date_index))
engagement = np.random.randint(100,1000, len(date_index))
reach_engagement = {"reach":reach,
"engagement":engagement}
df = pd.DataFrame( data = reach_engagement,
index = date_index)
#Resempling (the goal of the question)
df_resample = df.resample('W-MON').fillna("pad")
len(df_resample)
df_resample = df_resample/7
df_resample.round()
Output:
2020-01-06 533.0 136.0
2020-01-13 533.0 136.0
2020-01-20 533.0 136.0
2020-01-27 533.0 136.0
2020-02-03 609.0 28.0
...
2021-12-20 1228.0 129.0
2021-12-27 1228.0 129.0
2022-01-03 499.0 33.0
105 rows ร 2 columns | {
"domain": "codereview.stackexchange",
"id": 44367,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, pandas",
"url": null
} |
python, pandas
Answer: Your direct use of df.resample('W-MON') neglects the fact that your reach and engagement are applied to months, which presumably all start on the first of the month and have variable length. W-MON is not variable-length. Instead you need to do something like:
Upsample to days
Divide by the number of days in the given month
Only then, downsample to weeks
This could look like:
from datetime import date
import numpy as np
import pandas as pd
from numpy.random import default_rng
months = pd.date_range(
start=date(2020, 1, 1), freq='MS', name='month_start',
end=date(2023, 1, 1),
)
n = len(months) - 1
rand = default_rng(seed=0)
df = pd.DataFrame(
data={'reach': rand.integers(1_000, 10_000, n),
'engagement': rand.integers(100, 1_000, n)},
index=months[:-1])
df.loc[months[-1], :] = [np.nan, np.nan] # for resampling
by_day = df.resample('D').ffill().iloc[:-1, :]
month_days = by_day.reach.groupby(pd.Grouper(freq='M')).transform('count')
by_day.reach /= month_days
by_day.engagement /= month_days
by_week = by_day.resample('W-MON').sum() | {
"domain": "codereview.stackexchange",
"id": 44367,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, pandas",
"url": null
} |
performance, multithreading, rust, primes, formatting
Title: Multithreaded segmented Sieve of Eratosthenes
Question: I am fairly new to Rust and thought a good way to practice would be to write a multithreaded segmented Sieve of Eratosthenes. It performs ok (searches ten-billion numbers in about 11 seconds on my system, I tried one-hundred-billion but it couldn't allocate enough memory). I was just wondering if there was any clear improvements that could be made in my code? I'm mostly hoping to get some Rust and formatting tips, but am very open to math improvements as well.
Thank you in advance!
use std::{
sync::{Arc, RwLock},
thread, time::Instant, cmp::min
};
pub fn main() {
let now = Instant::now();
let primes = threaded_segmented_sieve_of_eratosthenes(10000000000);
let finished =now.elapsed().as_secs_f64();
//println!("{:?}\n", primes);
println!("found {} primes in {}s", primes.len(), finished);
}
fn threaded_segmented_sieve_of_eratosthenes(limit:usize) -> Vec<usize> {
let threads = num_cpus::get();
explicit_threaded_segmented_sieve_of_eratosthenes(limit, threads)
}
fn explicit_threaded_segmented_sieve_of_eratosthenes(limit:usize, threads:usize) -> Vec<usize> {
let sqrt_of_limit = (limit as f64).sqrt().ceil() as usize;
let early_primes = if limit <= 230 {
Arc::new(RwLock::new(vec![2,3,5,7,11,13,17]))
} else {
Arc::new(RwLock::new(threaded_segmented_sieve_of_eratosthenes(sqrt_of_limit)))
};
let mut thread_handles = Vec::new();
let thread_spacing = (limit - sqrt_of_limit) / threads;
let segment_size = min(100000, sqrt_of_limit);
for i in 0..threads {
let early_primes = early_primes.clone();
let lowest_checked = sqrt_of_limit + i *thread_spacing;
let mut highest_checked = lowest_checked + thread_spacing; | {
"domain": "codereview.stackexchange",
"id": 44368,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, multithreading, rust, primes, formatting",
"url": null
} |
performance, multithreading, rust, primes, formatting
if i == threads - 1 {
highest_checked = limit;
}
thread_handles.push(thread::spawn(move|| {
eratosthenes_segment_thread(early_primes, lowest_checked, highest_checked, segment_size)
}));
}
let mut new_primes = Vec::new();
for handle in thread_handles {
new_primes.append(&mut handle.join().unwrap());
}
let mut early_primes = early_primes.write().unwrap();
early_primes.append(&mut new_primes);
return early_primes.to_owned();
}
fn eratosthenes_segment_thread(early_primes:Arc<RwLock<Vec<usize>>>, lowest_checked: usize, highest_checked: usize, segment_size: usize) -> Vec<usize> {
let mut returned_primes = Vec::new();
let mut lower = lowest_checked;
let mut higher = lowest_checked + segment_size;
let early_primes = early_primes.read().unwrap();
while lower < highest_checked {
if higher > highest_checked {
higher = highest_checked;
}
let mut new_primes = vec![true; segment_size];
for i in 0..early_primes.len() {
let mut lolim = (lower / early_primes[i]) * early_primes[i];
if lolim < lower {
lolim += early_primes[i]
}
let mut j = lolim;
while j < higher {
new_primes[j - lower] = false;
j += early_primes[i];
}
}
let mut p = lower;
while p < higher {
if new_primes[p - lower] {
returned_primes.push(p);
}
p += 1
}
lower += segment_size;
higher += segment_size;
}
return returned_primes;
}
github link: https://github.com/knot427/Primes_To_N
Answer: Rust has great tooling. Use it!
Formatting
I'm mostly hoping to get some Rust and formatting tips | {
"domain": "codereview.stackexchange",
"id": 44368,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, multithreading, rust, primes, formatting",
"url": null
} |
performance, multithreading, rust, primes, formatting
I'm mostly hoping to get some Rust and formatting tips
Formatting tip: cargo fmt will format your code in the idiomatic Rust style.
I don't necessarily always agree with the format... but it's good enough that I love automating that problem away.
Linting
In the same vein, cargo clippy will run a linter-on-steroids. In this case, it points out that using return x; as the last statement of a function is unnecessary, and you can just type x instead.
Let us start with some Rust style.
Learn your iterators
You are using indexes and arrays as if you were using C. This is not idiomatic, and may be costing you performance.
The loop while lower < highest_checked can be rewritten as:
for lower in (lowest_checked..highest_checked).step(segment_size) {
let higher = cmp::min(lower + segment_size, highest_checked);
// ...
}
The loop for i in 0..early_primes.len() can be rewritten as:
for early_prime in early_primes.iter().copied() {
...
}
Note: the copied ensures we have a local copy instead of a reference.
Similarly, the inner loop while j < higher can be rewritten as:
new_primes[(lolim - lower)..(higher - lower)]
.iter_mut()
.step(early_prime)
.for_each(|is_prime| *is_prime = false);
Note: unlike your version, - lower is only applied at the beginning, rather than repeatedly, and bounds are not checked repeatedly.
And finally that while p < higher can be rewritten as:
new_primes
.into_iter()
.enumerate()
.filter(|(is_prime, _)| *is_prime)
.for_each(|(_, offset)| returned_primes.push(lower + offset as u64));
Using iterator code will avoid faffing about with indexes as much as possible:
This avoids the risk of getting them wrong.
This avoids the cost of bounds checks. | {
"domain": "codereview.stackexchange",
"id": 44368,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, multithreading, rust, primes, formatting",
"url": null
} |
performance, multithreading, rust, primes, formatting
Let us focus on the core algorithm now.
Numeric Types
Your use of numeric types is problematic. You use usize when the number wouldn't fit in u32 (and 32-bits platforms are a thing), you convert usize to f64 which could lose precision, etc...
First of all, I'd argue for replacing all sieve numbers by u64, it's guaranteed to be large enough to hold the values you want regardless of the platform.
Secondly, your calculation of the square root of a u64 using a f64 is flawed for any u64 with at least 54 significant bits. You should either guard against that with an assert (53 significant bits is pretty large, already) or you should use the floating square as estimate and refine it.
Sieve on Stack
Your while lower < highest_checked loop will repeatedly create and throw away your sieve.
Because you limit the sieve to 100 K elements, if you made that 100 K a constant you could use an array of 100 K elements on the stack, without issues.
Flip Sieve
At the moment, you initialize your sieve with 1s. Initializing with 0s may be faster, so you may want trying to flip the meaning of those booleans.
(No guarantee there)
Sieve Memory Trashing
Cutting down on the sieve size will also cut down on the sieve cache usage, but it won't change the fact that you are repeatedly (in while j < higher) looping over that memory from one end to the other.
At the moment, you limited the segment size to 100 KB, which is a good start, but the L1 data cache is only 32 KB. It may be that limiting the segment size to 16 KB (half of L1) would be more cache-friendly, speeding up this inner loop execution.
And while at it, I'd advise putting that early_primes[i] in a local variable1, just in case the compiler doesn't figure out it doesn't have to re-read it from memory every single time.
Altogether, this will keep the inner loop with minimal memory access, and hopefully it will make it easier for the compiler to unroll that loop.
1 Using Rust Iterators properly will do that. | {
"domain": "codereview.stackexchange",
"id": 44368,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, multithreading, rust, primes, formatting",
"url": null
} |
performance, multithreading, rust, primes, formatting
Let us look at threading next.
Locking or not locking
Your use of RwLock appears superfluous:
You create the vector of primes.
You then run multiple threads in parallel which only ever read the data.
After joining, you write to the data.
There's never, actually, concurrent attempts at reading and writing, since all writes only happen when a single thread accesses the data.
You can thus, instead:
Use Arc<Vec<u64>>, directly.
Use Arc::try_unwrap(early_primes).unwrap() to get back the vector at the end.
This will eschew locking altogether, and avoid the to_owned() call to clone the vector again at the end.
Thread Creation is a costly endeavor.
Creating and Joining a thread are NOT trivial operations. Really not.
You would need to measure the cost of creating and joining compared to the cost of actually running the sieve, but I would not be surprised to learn it's a significant overhead especially at the beginning when sqrt_of_limit is low and early_primes is small.
The typical answer to this is to use a thread-pool, or some message-passing, so that rather than spinning up a new thread per "job to do", you spin up a few threads, have each of them perform all the jobs that need doing, and only then unwind them all.
I've never used thread pool libraries, so no idea which are good ones, but it may be worth investigating. | {
"domain": "codereview.stackexchange",
"id": 44368,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, multithreading, rust, primes, formatting",
"url": null
} |
c++, algorithm, graph
Title: Implementation of DFS algorithm as described by Algorithms - Dasgupta, Papadimitrious, Umesh Vazirani
Question: I just want feedback regarding C++ coding style and if in any way I can optimize my code (e.g. to use a different data structure). I'm only trying to use up to C++ 14 standard code. Any other improvements are greatly appreciated as well.
file: graph.h
#ifndef GRAPH_H
#define GRAPH_H
#include <iostream>
#include <map>
#include <utility>
#include <vector>
struct Edge
{
char src, dest;
};
class Graph
{
public:
std::map<char, std::vector<char>> adjList_;
// marks which nodes have been visited. True = visited, false = not visited
std::map<char, bool> visited_;
// Graph Constructor
Graph(std::vector<Edge> const &edges);
void DFS(Graph& g);
void explore(Graph& g, char vertex);
void previst(char vertex);
void postvist(char vertex);
};
inline void printGraph(Graph const &graph)
{
std::cout << "Printing out the graph \n";
for(const auto& value : graph.adjList_)
{
std::cout << "Source node: " << value.first << " \nconnected elements: " << std::endl;
for(const auto& element: value.second)
{
std::cout << element << " ";
}
std::cout <<"\n" << std::endl;
}
}
#endif //GRAPH_H
file: graph.cpp
#include "graph.h"
Graph::Graph(std::vector<Edge> const &edges)
{
// add edges to the directed graph
for (auto &edge: edges)
{
// insert at the end
adjList_[edge.src].push_back(edge.dest);
// establish initial visited values for each node
visited_[edge.src] = false;
}
}
void Graph::DFS(Graph& g)
{
// We check for nodes that have not been visited
for (auto const& x : g.visited_)
{
if (x.second == false)
{
explore(g, x.first);
}
}
}
void Graph::explore(Graph& g, char vertex)
{
g.visited_[vertex] = true;
previst(vertex); | {
"domain": "codereview.stackexchange",
"id": 44369,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, graph",
"url": null
} |
c++, algorithm, graph
void Graph::explore(Graph& g, char vertex)
{
g.visited_[vertex] = true;
previst(vertex);
for(auto const& sourceNode : g.adjList_[vertex])
{
if(g.visited_[sourceNode] == false)
{
explore(g, sourceNode);
}
}
postvist(vertex);
}
void Graph::previst(char vertex)
{
std::cout << "Pre visiting vertex: " << vertex << std::endl;
}
void Graph::postvist(char vertex)
{
std::cout << "Post visting vertex: " << vertex << std::endl;
}
file: dfsDriver.cpp
#include "../graph.h"
int main()
{
// undirected edges
std::vector<Edge> edges =
{
{'g', 'd'}, {'g','h'} , {'d', 'h'},
{'d', 'g'}, {'h', 'g'}, {'h', 'd'},
{'d', 'a'}, {'a', 'd'}, {'a', 'b'},
{'b', 'a'}, {'a', 'c'}, {'c', 'a'},
{'c', 'f'}, {'f', 'c'},
{'b', 'f'}, {'f', 'b'}, {'b', 'e'},
{'e', 'b'}, {'e', 'i'}, {'i', 'e'},
{'e', 'j'}, {'j', 'e'},
{'i', 'j'}, {'j', 'i'}, {'k', 'l'},
{'l', 'k'}
};
Graph graph(edges);
//printGraph(graph);
graph.DFS(graph);
}
input graph:
Answer: Optimizing the data structures
You can indeed optimize the way you store the graph a bit. You don't care about the exact order of the vertices, so it is better to use std::unordered_map instead of std::map, as lookups and insertions will then be \$O(1)\$ instead of \$O(\log V)\$.
For visited_, I see two alternatives. First, instead of adding all vertices to it, only add the vertices that have been visited. The bool is then unnecessary, and in fact you should then use std::unordered_set instead of a map. Alternatively, store all the data related to a node together in a struct, and just have a single container. For example:
struct NodeData {
std::vector<char> neighbors;
bool visited;
};
std::map<char, NodeData> adjList_; | {
"domain": "codereview.stackexchange",
"id": 44369,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, graph",
"url": null
} |
c++, algorithm, graph
std::map<char, NodeData> adjList_;
The approach you have taken is the right one in general. If you have more information about the graph, for example if it has only a few vertices or very many, or whether it is very sparse or very dense, you might be able to get more performance using more specialized data structures. In fact, since you have limited yourself to chars, you could store the graph like so:
constexpr std::size_t N = 1 << CHAR_BIT; // most likely 256 on your machine
std::bitset<N> vertices_;
std::array<std::bitset<N>, N> adjList_;
std::bitset<N> visited_;
A std::bitset of 256 bits only uses 32 bytes. Each bit represents whether a vertex is part of the graph (vertices_), whether it was visited (visited_), or for a given vertex, which vertices are its neighbors (adjList_). Consider that an empty std::vector uses a similar amount of memory, and std::map and friends have to allocate memory for every element they store. The bitsets and the array use a fixed amount of memory, but everything is guaranteed to be stored compactly in memory, and lookups are very fast.
You can nest structs and classes
It can make sense here to move struct Edge into class Graph. This avoids polluting the global namespace, and has some other benefits you'll see below.
class Graph{
public:
struct Edge
{
char src, dest;
};
โฆ
Graph(std::vector<Edge> const &edges);
โฆ
};
โฆ
int main()
{
std::vector<Graph::Edge> edges = {โฆ};
โฆ
}
No need to pass an object to its own member functions
Why does Graph::DFS() take a Graph& as a parameter? Since it is a member function of Graph, this is unnecessary. The same goes for Graph::explore(). You can just write:
void Graph::DFS()
{
for (auto const& x : visited_)
{
if (x.second == false)
{
explore(x.first);
}
}
} | {
"domain": "codereview.stackexchange",
"id": 44369,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, graph",
"url": null
} |
c++, algorithm, graph
Make the code more generic
You have hardcoded the type of vertex to char. What if you have a graph with more than 256 different vertices? Maybe you want to identify vertices by integers, or maybe even strings. You can make your code more generic by using templates:
template<typename VertexID>
class Graph
{
public:
struct Edge
{
VertexID src, dest;
};
โฆ
void explore(VertexID vertex);
void previst(VertexID vertex);
void postvist(VertexID vertex);
};
You can go further. Consider your constructor: it takes a reference to a std::vector, but does it really matter what type the container is? What if the caller has the edges in a std::array? Converting it to a std::vector would be wasteful. You could make your constructor a template that takes any kind of container:
template<typename Container>
Graph::Graph(Container const& edges)
{
for (auto &edge: edges)
{
โฆ
}
}
If you accidentily call the constructor with something that is not a container of Edges, this will fail to compile, but probably with some cryptic error messages. With C++20 you can use concepts to restrict the type of container:
template<std::ranges::input_range<Edge> Container>
Graph::Graph(Container const& edges)
{
โฆ
}
You also hardcoded previst() and postvist(). You could make DFS() take a function as an argument that it runs on vertices, either using std::function<void(VertexID&)>, or with a template parameter:
template<typename Function>
void Graph::DFS(Function& postvisit) {
for (auto const& x : g.visited_)
{
if (x.second == false)
{
explore(x.first, postvisit);
}
}
}
template<typename Function>
void Graph::explore(VertexID vertex, Function& postvisit)
{
g.visited_[vertex] = true;
for(auto const& sourceNode : g.adjList_[vertex])
{
if(!g.visited_[sourceNode])
{
explore(sourceNode, postvisit);
}
}
std::invoke(postvisit, vertex);
} | {
"domain": "codereview.stackexchange",
"id": 44369,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, graph",
"url": null
} |
c++, algorithm, graph
std::invoke(postvisit, vertex);
}
And then use it like so:
std::vector<Graph::Edge<char>> edges = {โฆ};
Graph graph(edges);
graph.DFS([](char vertex) {
std::cout << "Post visiting vertex: " << vertex << '\n';
});
Use '\n' instead of std::endl
Prefer to use '\n' instead of std::endl; the latter is equivalent to the former, but also forces the output to be flushed, which is usually unnecessary, and has a negative impact on performance.
What if you want to call DFS() more than once?
If you call DFS() a second time, nothing will happen since visited_ is true for all vertices. You should clear visited_ at the start of DFS(). Maybe it is even better not to have visited_ as a member variable, but instead make it a local variable in DFS(), and pass a reference to it to explore(). This way, each call to DFS() will have a fresh version, and less space is used for the Graph object itself.
If you really intend this to be used only once, then this shouldn't be a class to begin with. Instead, you could write a free function DFS() that takes a set of edges (and possibly a visitor function) as a parameter, and creates all the state necessary for the depth-first search as local variables. | {
"domain": "codereview.stackexchange",
"id": 44369,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c++, algorithm, graph",
"url": null
} |
go, concurrency
Title: A producer (mapper) and consumer (reducer) problem with concurrency in go with race conditions
Question: Link to go playground https://go.dev/play/p/ctQDpDW6pui
This code has been based on suggestions and conversations in this thread here
Architecture:
A read method creates a channel shared with producer. Read method adds to the channel and producer reads from it.
A producer and consumer share a channel. Producer adds data, consumer consumes.
The producer and consumer use error channels to communicate errors to main method.
A hard requirement is if any worker - the producer or consumer - encounters an error then all workers should halt. This caused me to use contexts to cancel the producer and consumers.
The producers and consumers communicate error via error channel.
A go-routine called handleAllErrors consumes the errorChannel and uses context's cancel to call quits and shut down the remaining producers and consumers.
Issues
I have tried hard to overcome challenges of deadlock and to the best of my knowledge all deadlock issues have been resolved however the code appears a some improvement.
I am especially concerned of the race conditions of draining queues when the consumers are interrupted.
Eg: if producers are still computing, while all the consumers get interrupted the code could blocks/deadlocks.
Eg: if all producers are interrupted while reader is still feeding it, it may blocks.
Please review the code, help figure any deadlocks I missed and suggest a cleaner approach to some deadlocks I tried to work-around.
package operation
import (
"context"
"fmt"
"sync"
)
func mapperreducer_so() {
a1 := []int{1, 2, 3, 4, 5}
a2 := []int{5, 4, 3, 1, 1}
a3 := []int{6, 7, 8, 9}
a4 := []int{1, 2, 3, 4, 5}
a5 := []int{5, 4, 3, 1, 1}
a6 := []int{6, 7, 18, 9}
arrayOfArray := [][]int{a1, a2, a3, a4, a5, a6}
ctx, cancel := context.WithCancel(context.Background())
ch1 := read(ctx, arrayOfArray) | {
"domain": "codereview.stackexchange",
"id": 44370,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "go, concurrency",
"url": null
} |
go, concurrency
ctx, cancel := context.WithCancel(context.Background())
ch1 := read(ctx, arrayOfArray)
messageCh := make(chan int)
errCh := make(chan error)
producerWg := &sync.WaitGroup{}
for i := 0; i < 3; i++ {
producerWg.Add(1)
producer(ctx, producerWg, ch1, messageCh, errCh)
}
consumerWg := &sync.WaitGroup{}
for i := 0; i < 3; i++ {
consumerWg.Add(1)
consumer(ctx, consumerWg, messageCh, errCh)
}
firstError := handleAllErrors(ctx, cancel, errCh)
producerWg.Wait()
close(messageCh)
consumerWg.Wait()
close(errCh)
fmt.Println(<-firstError)
}
func read(ctx context.Context, arrayOfArray [][]int) <-chan []int {
ch := make(chan []int)
go func() {
defer close(ch)
for i := 0; i < len(arrayOfArray); i++ {
select {
case <-ctx.Done():
return
case ch <- arrayOfArray[i]:
}
}
}()
return ch
}
func producer(ctx context.Context, wg *sync.WaitGroup, in <-chan []int, messageCh chan<- int, errCh chan<- error) {
go func() {
defer wg.Done()
for {
select {
case <-ctx.Done():
return
case arr, ok := <-in:
if !ok {
return
}
for i := 0; i < len(arr); i++ {
// simulating an error.
//if arr[i] == 10 {
// errCh <- fmt.Errorf("producer interrupted")
//}
select {
case <-ctx.Done():
return
case messageCh <- 2 * arr[i]:
}
}
}
}
}()
}
func consumer(ctx context.Context, wg *sync.WaitGroup, messageCh <-chan int, errCh chan<- error) {
go func() {
wg.Done() | {
"domain": "codereview.stackexchange",
"id": 44370,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "go, concurrency",
"url": null
} |
go, concurrency
for {
select {
case <-ctx.Done():
return
case n, ok := <-messageCh:
if !ok {
return
}
fmt.Println("consumed: ", n)
// simulating erros
//if n == 10 {
// errCh <- fmt.Errorf("output error during write")
//}
}
}
}()
}
func handleAllErrors(ctx context.Context, cancel context.CancelFunc, errCh chan error) <-chan error {
firstErrCh := make(chan error, 1)
isFirstError := true
go func() {
defer close(firstErrCh)
for err := range errCh {
select {
case <-ctx.Done():
default:
cancel()
}
if isFirstError {
firstErrCh <- err
isFirstError = !isFirstError
}
}
}()
return firstErrCh
} | {
"domain": "codereview.stackexchange",
"id": 44370,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "go, concurrency",
"url": null
} |
go, concurrency
Answer: I have several suggestions for you.
The first is to rename mapperreducer_so to TestProducerConsumer and to change its signature from func() to func(t *testing.T). This will turn it into a test function that you can run with go test. Better still, you can give the -race flag to go test to turn on the race detector, which will tell you if there are deadlock issues in the code you test.
If you also put TestProducerConsumer into a file with a _test.go suffix, it will be omitted from any non-test build.
Next, when you create a context with context.WithCancel, you should immediately include a defer cancel() line to make sure the context is canceled no matter how you leave the function. An exception is if you want the context to persist after the function's lifetime, but that's rare and probably indicates a need to restructure your code. (If you're worried that cancel will be called multiple times, don't be - the second and subsequent times are harmless no-ops. The worse case is calling it zero times.)
Next, if you want an error in any goroutine to cause the other goroutines to be canceled, you want an errgroup.Group instead of a sync.WaitGroup. That will obviate a lot of the logic you've written here.
Next, I suggest using a single errgroup.Group to contain all producers and all consumers. There doesn't seem to be any reason to prefer separate groups.
Next, it's good practice to leave it up to the caller to launch goroutines. I suggest rewriting producer and consumer to contain only the bodies of the deferred funcs that they now contain, and changing the caller to invoke go producer(...) and go consumer(...). (It'll look a little different if you use errgroup.Group as I suggested above.)
If you use errgroup.Group, you can and should rewrite producer and consumer to return an error instead of sending it on a channel, and that should include ctx.Err() in the ctx.Done() case.
The wg.Done() at the top of consumer should of course be defer wg.Done(). | {
"domain": "codereview.stackexchange",
"id": 44370,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "go, concurrency",
"url": null
} |
go, concurrency
The wg.Done() at the top of consumer should of course be defer wg.Done().
Finally, permit me humbly to commend the package github.com/bobg/go-generics/parallel to your attention. | {
"domain": "codereview.stackexchange",
"id": 44370,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "go, concurrency",
"url": null
} |
beginner, c
Title: Write a program entab that replaces strings of blanks by the minimum number of tabs and blanks to achieve the same spacing
Question: My attempt at Exercise 1-21 in the The C Programming Language book written by Brian Kernighan and Dennis Ritchie.
Exercise Prompt
Write a program entab that replaces strings of blanks by the minimum number of tabs and blanks to achieve the same spacing.
sample.txt
apple banana pear
Expected Output with Test Text
\tapple\tbanana\t\t\tpear
Code
//Exercise 1-21. Write a program entab that replaces strings of blanks by the minimum
//number of tabs and blanks to achieve the same spacing. Use the same tab stops as for detab.
//When either a tab or a single blank would suffice to reach a tab stop, which should be given
//preference?
#include <stdio.h>
#define CHARMAX 1000 // max chars per line
#define LINEMAX 1000 // max total number of lines
#define TABSTOP 8
int main() {
int ch = 0;
size_t charCount = 0; // running total of characters
size_t lineCount = 1; // running total of lines
size_t space_count = 0; // running count of spaces | {
"domain": "codereview.stackexchange",
"id": 44371,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, c",
"url": null
} |
beginner, c
while ((ch = getchar()) != EOF) {
charCount++;
if (ch == '\n') {
lineCount++;
charCount = 0;
}
if (lineCount > LINEMAX) {
printf("\n!PROGRAM HALTED -MAX LINES REACHED- MAXIMUM NUMBER OF LINE ALLOWED IS %d!\n", LINEMAX);
break;
}
if (ch == ' ') {
while ((ch = getchar()) == ' ') {
space_count++;
if (space_count % TABSTOP == 0) {
putchar('\t');
space_count = 0;
}
}
if (space_count != 0) {
putchar('\t');
}
}
if (charCount < CHARMAX) {
putchar(ch);
} else {
while ((ch = getchar()) != '\n') {
;
}
putchar(ch);
lineCount++;
charCount = 0;
}
}
}
```
Answer: Thank you for putting your work out there, it's brave of you!
That is how we improve.
The motivation for CHARMAX / LINEMAX is unclear,
given that they are not part of the problem specification.
Gold star for giving each
magic number
a manifest constant name.
The usual motivation for CHARMAX (or MAX_LINE_LENGTH, to distinguish
from total document length) would be to allow a get_line() function
to get away with static allocation instead of having to malloc().
The LINEMAX just kind of mystifies me.
I mean, what if $ yes | entab processes
an unbounded number of lines?
Is it somehow "correct" to bail after a while?
Or should the pipeline have looked like $ yes | head -${LINEMAX} | entab
if we wanted finite output?
size_t charCount = 0; // running total of characters
size_t lineCount = 1; // running total of lines
size_t space_count = 0; // running count of spaces | {
"domain": "codereview.stackexchange",
"id": 44371,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, c",
"url": null
} |
beginner, c
Pick a coding standard.
There's lots to choose from, maybe GNU or Google.
And then stick with it.
In particular, avoid arbitrarily mixing camelCase with snake_case.
Kudos, the identifiers are wonderfully descriptive.
Consider deleting the // comments, as they don't really
tell us anything we didn't already know.
} else {
while ((ch = getchar()) != '\n') {
;
}
tiny style nit: The ; semicolon on a line by itself is great,
it calls attention to "this is a no-op!". But it's maybe a bit
weird within { } braces. Use one or the other. This comes
back to: pick a style guide, write down which one, and adhere to it.
Which is a lot easier if you routinely use a linter or a
code reformatter which implements that guide.
Bigger item: We're discarding characters till end-of-line?
I don't find that in the original spec. Ok, fine, maybe,
write down the revised spec that you are implementing.
One can only report a code defect ("bug") with respect
to a spec. Missing specs cause endless misery.
Again, the usual motivation for such line truncation
would revolve around static allocation for a line buffer.
But your code makes no such allocation, so it just kind
of seems like gratuitous data deletion. | {
"domain": "codereview.stackexchange",
"id": 44371,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, c",
"url": null
} |
beginner, c
Here's a bigger critique.
Your loop features four if clauses, some with dependent clauses.
And there's quite a lot of mutation going on, the most
important being consuming input characters.
I don't find it especially easy to reason about
invariants
in this code.
Consider introducing a helper function that is responsible
for processing exactly one line of input text.
(For one thing, that makes writing unit tests way easier.)
Let's define a term.
def: We are at start-of-line when at start of input file,
and also right after we've seen a \n newline input character.
The outer loop invariant would be "we are at start-of-line each
time the while loop begins again", and helper would have
its own inner loop which is responsible for making the invariant true.
That is, it's responsible for consuming a full line and
returning control to the outer loop.
Here's an example document for a unit test.
sp16 = " " * 16
doc = f"{sp16}A{sp16}B"
expected_output = "\t\tA\t\t B"
I don't find it especially easy to reason about space_count
OBOB
issues given the current structure.
Maybe the input is specified as having no TAB characters?
If so, we should signal an error upon encountering one.
If not, maybe reset space_count to zero?
// When either a tab or a single blank would suffice to reach a tab stop, which should be given preference?
It seems the problem author was inviting an explicit response to this,
perhaps in the form of a comment.
Overall?
You seem to have adopted some novel requirements that don't appear
in the problem statement.
Running with those new requirements, the code appears to implement
them correctly.
There is an opportunity to structure the code into smaller chunks,
each having greater clarity for future maintainers who will
attend to bug fixes and feature enhancements. | {
"domain": "codereview.stackexchange",
"id": 44371,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "beginner, c",
"url": null
} |
python, python-requests
Title: Python: Handling different HTTP status codes using the requests library
Question: I am using the requests library to make requests to an API. However, I am not sure how to handle different status codes. I am particularly interested in handling status code 500, as that is part of my task.
1: Is a try/except block like I have an acceptable way of handling error codes?
2: How should I handle 500 status code? Would retrying 5 times once per second, then exiting be acceptable? If so, I don't know how to implement that without wrapping the whole function in a for loop (which seems bad practice).
Here is my function that makes HTTP requests:
def make_request(type, endpoint, h, data=None):
try:
url = f'https://example.net/not-important/{endpoint}'
if type == 'GET':
r = requests.get(url, headers=h)
elif type == 'POST':
r = requests.post(url, headers=h, json=data)
# r = requests.get('https://httpstat.us/500')
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 500:
# 500 logic here. retry?
else:
print(f'An error occurred: {e}\n{e.response.status_code}')
sys.exit(1)
else:
return r.json()
Answer: I suggest you implement exponential back-off. For example, first try after x seconds and then twice as long as the last wait time. This is the common practice for retries. Recursion often looks cleaner than a loop. You could give your make_request function an additional parameter with a default value of 0 that counts retries. As always with recursion make sure to have a break point e.g. 5 retries to avoid infinite recursion. | {
"domain": "codereview.stackexchange",
"id": 44372,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "python, python-requests",
"url": null
} |
haskell
Title: Two Sum Implementation on Haskell Brute Force and Optimized approach
Question: Two Sum Problem:
Given an array of integers A and an integer K return True if there are two elements two elements xi, xj (i != j) such that xi + xj = K. Return False otherwise.
I am implementing the classical two sum problem but I am currently practising Functional programming so I picked Haskell. I wrote a naive implementation which I expect is O(nยฒ) (not sure though).
In the brute force implementation I just try every pair of numbers until I find the ones that meet the criteria;
-- O(n^2) implementation
twoSum :: [Int] -> Int -> Bool
twoSum [] k = False
twoSum (x:[]) k = False
twoSum (x:y:[]) k = k == x + y
twoSum (x:y:xs) k
| k == x + y = True
| otherwise = twoSum (x:xs) k || twoSum (y:xs) k
Then I tried to optimize it. I use a Set to store seen numbers, and then traverse every element x of the array. If K - x is in the Set, it means I found the pair that satisfies the condition.
Per my analysis I think the time complexity is O(n log n) because set implementation in Haskell is O(log n) in insert and check membership:
import qualified Data.Set as S
import Prelude
-- O(n log n) Implementation
twoSumOpt:: [Int] -> Int -> Bool
twoSumOpt [] k = False
twoSumOpt (x:[]) k = False
twoSumOpt (x:y:[]) k = k == x + y
twoSumOpt (x:y:xs) k
| k == x + y = True
| otherwise = twoSum' (y:xs) k (S.insert x seen)
where
seen = S.fromList([])
twoSum' :: [Int] -> Int -> S.Set Int -> Bool
twoSum' (x:[]) k s = S.member (k - x) s
twoSum' (x:y:[]) k s = k == x + y
twoSum' (x:xs) k s
| S.member (k - x) s = True
| otherwise = twoSum' xs k (S.insert x s) | {
"domain": "codereview.stackexchange",
"id": 44373,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "haskell",
"url": null
} |
haskell
I tested with some inputs and profiled solutions:
ghci> twoSumOpt [1, 2, -4, 3, 5, -7, 8] (-2)
True
(0.01 secs, 892,912 bytes)
ghci> twoSumOpt [1..10000000] 10000001
True
(8.51 secs, 8,154,041,496 bytes)
ghci> twoSumOpt [1..100000000] 100000001
True
(95.83 secs, 91,669,673,632 bytes)
Question: am I following the correct Haskell patterns? (the O(Nยฒ) implementation didn't finish with my inputs so I assume I got the improvement in performance).
And finally, can we do better in Haskell? Is there a way to implement an amortized insertion and lookup of O(1) so I can do this in O(N)?
Answer: Sorry, I know this is stale.
First things first, from a code quality perspective, it's usually not good to have redundant cases. e.g. [x, y] and x:y:xs will both match 1:2:[], and you'll get the behavior you want regardless of which branch of your code it goes to, so only keep the more general one. (Don't worry about the tiny bit of extra work that will happen from the extra recursion, we don't know how it will compare to the extra work that's saved by checking one less pattern!)
It's a little harder to see how this applies in the case of twoSum' than twoSumOpt, but it does (at least once you add a [] case so the pattern match is total).
In a similar vein, why do we need any cases at all for twoSumOpt? twoSum' is already doing basically all of the work, just let it do everything. This gets us as far as:
twoSumOpt :: [Int] -> Int -> Bool
twoSumOpt = twoSum' $ S.fromList []
where twoSum' :: S.Set Int -> [Int] -> Int -> Bool
twoSum' _ [] _ = False
twoSum' s (x:xs) k
| S.member (k - x) s = True
| otherwise = twoSum' (S.insert x s) xs k | {
"domain": "codereview.stackexchange",
"id": 44373,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "haskell",
"url": null
} |
haskell
Can/should we do it in one line? I'm not sure. Our worst-case time will always be when we traverse the entire list, and load it all into our Set, so trying to short-circuit on matches only helps our optimistic paths. If we solve this more bluntly, it'll be a bit easier to read. IDK how it'll affect performance; I don't think it can make the worst case worse by more than a constant 2 factor.
twoSumOpt :: [Int] -> Int -> Bool
twoSumOpt xs k = any (`S.member` s) xs
where s = S.fromList $ (k -) <$> xs
I'm unaware of any algorithms for this problem with fundamentally better worst-case performance, but it seems like containers's Data.Set implementation, while nice and general purpose, isn't the most performant. You could experiment with IntSet or HashSet; it should be an easy swap. | {
"domain": "codereview.stackexchange",
"id": 44373,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "haskell",
"url": null
} |
javascript, object-oriented, array, functional-programming, ecmascript-6
Title: Transform an array into object and set index for an unique key in javascript
Question: I have a method that merges keys and indexes from an array into an object.
I'm stuck with ways to compress this method, and I don't know what I can do to make it simpler.
Goal
get an array of objects with keys
return an object with a unique key and the matching indexes
Code (simplified)
const items = [
{
"key": 0
},
{
"key": 2
},
{
"key": 4
},
{
"key": 4
}
]
function mergeItems (items) {
const helperObj = {}
// loop over items
items.forEach((item, itemIdx) => {
const { key } = item
// got key in helper obj? push index
if (key in helperObj) {
helperObj[key].push(itemIdx)
} else {
// create new array and set index
helperObj[key] = [itemIdx]
}
});
return helperObj
}
console.log(mergeItems(items));
Expected output:
{
"0": [0],
"2": [1],
"4": [2,3]
}
Question
Is there a way to do this without creating a helper object?
Answer:
Is there a way to do this without creating a helper object?
It may be possible. Perhaps what you are really asking is whether there is a way to have a pure function, such that helperObj does not need to be declared outside the callback function and modified within the callback function before it is utilized after the forEach method is called. The callback functions passed to the forEach method typically end up not being pure for this reason.
Instead of using the forEach() method, the reduce() method can be used. It still may have a helperObj for the accumulator argument but the scope is limited to within the callback function. | {
"domain": "codereview.stackexchange",
"id": 44374,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, object-oriented, array, functional-programming, ecmascript-6",
"url": null
} |
javascript, object-oriented, array, functional-programming, ecmascript-6
const items = [{
"key": 0
},
{
"key": 2
},
{
"key": 4
},
{
"key": 4
}
]
const mergedItems = items.reduce(function(helperObj, { key }, index) {
if (helperObj[key]) {
helperObj[key].push(index);
} else {
helperObj[key] = [index];
}
return helperObj;
}, {});
console.log('merged items: ', mergedItems);
Instead of either pushing or assigning to an array the array could be created when it does not exist:
const items = [{
"key": 0
},
{
"key": 2
},
{
"key": 4
},
{
"key": 4
}
]
const mergedItems = items.reduce(function(helperObj, { key }, index) {
if (!helperObj[key]) {
helperObj[key] = [];
}
helperObj[key].push(index);
return helperObj;
}, {});
console.log('merged items: ', mergedItems);
And if the goal is to greatly simplify it, a ternary operator could be used to compress it even further:
const items = [{
"key": 0
},
{
"key": 2
},
{
"key": 4
},
{
"key": 4
}
]
const mergedItems = items.reduce((acc, { key }, index) => { acc[index] ? acc[index].push(key) : acc[index] = [key]; return acc}, {})
console.log('merged items: ', mergedItems); | {
"domain": "codereview.stackexchange",
"id": 44374,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "javascript, object-oriented, array, functional-programming, ecmascript-6",
"url": null
} |
c, file-system, posix
Title: Wipe a directory tree
Question: The idea here is to destroy the data in a directory tree by finding any files in it and overwriting their data with garbage before deleting them. The function returns minus the number of errors encountered, or zero if successful.
Obliterate.c:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <dirent.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/stat.h>
static char B[FILENAME_MAX];
static int RM(const size_t len) {
struct stat st;
if (stat(B, &st))
return -1;
if (S_ISDIR(st.st_mode)) {
DIR *const dir = opendir(B);
if (!dir)
return -1;
B[len] = '/';
for (struct dirent *dirent; (dirent = readdir(dir));) {
if (memcmp(dirent->d_name, ".", 2) && memcmp(dirent->d_name, "..", 3)) {
const size_t namelen = strlen(dirent->d_name);
char *const ptr = (char *)memcpy(B+len+1, dirent->d_name, namelen)+namelen;
*ptr = 0;
const int tmp = RM(ptr-B);
if (tmp)
return closedir(dir)+tmp-1;
}
}
B[len] = 0;
if (closedir(dir))
return -1;
} else if (S_ISREG(st.st_mode)) {
const int fd = open(B, O_WRONLY);
if (fd == -1)
return -1;
for (char buf[BUFSIZ]; st.st_size > 0; st.st_size -= BUFSIZ) {
const size_t size = st.st_size > BUFSIZ ? BUFSIZ : st.st_size;
arc4random_buf(buf, size);
if (write(fd, buf, size) < size)
return close(fd)-1;
}
if (close(fd))
return -1;
}
return remove(B);
}
int main(const int argc, const char *const *argv) {
while (*++argv) {
const size_t len = strlen(*argv);
memcpy(B, *argv, len);
if (RM(len))
perror(*argv);
}
return 0;
} | {
"domain": "codereview.stackexchange",
"id": 44375,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, file-system, posix",
"url": null
} |
c, file-system, posix
Answer: The code is very dense, to the extent that it harms readability. I'm not a fan of variable names such as B, especially for globals. We could do with better layout and more comments. And I recommend always using compound statements with loops and conditionals, even when the body is a single statement and {โฆ} aren't strictly required.
Compilation warns that there's no visible definition of arc4random_buf(). I strongly urge you to require prototypes for all functions that are used rather than fallback back to "implicit int".
I would use strcmp() rather than memcmp() with hand-crafted lengths when testing for . and .., to more clearly convey intent and to eliminate even the possibility of mismatch. Similarly, prefer strcpy() (or strncpy) to memcpy when copying arguments or directory entries into B.
memcpy() returns a void*, so there's no need to cast to assign it to a char* variable.
We have no defence against overflowing the end of B. FILENAME_MAX is not a guarantee that we can't create longer path names! Consider changing directory to keep pathnames relative and avoid the need for copying altogether.
Error messages aren't particularly informative: perror(*argv) shows only the argument at the root of the failure, but users are more likely to be interested in the actual file where the problem occurred.
Opening a file with O_WRONLY may well cause the file to be truncated on the first write, and new storage allocated - there's no expectation that the new contents will overwrite the disk blocks previously allocated to the file. In any case, overwriting like shred is only effective for filesystems that write file data in-place, without journalling, on disks with no translation layer (i.e. not most SSDs today). Even on files where that expectation is valid, I would lean towards mmap() for in-place overwriting. | {
"domain": "codereview.stackexchange",
"id": 44375,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, file-system, posix",
"url": null
} |
performance, c
Title: Finding a function from a name within a string
Question: The code is designed to see if the first alphabetic characters in the *str argument refers to a known function, as part of a calculator app, and sets *str to the first character after the name of the mathematical function if found, similar to the endptr argument in strtol, strtod, etc. I seek to make this code as direct as possible, eliminating redundant/unnecessary steps.
static int c(const void *const restrict a, const void *const restrict b) {
const char *const sa = *(const char *const *)a, *const sb = *(const char *const *)b;
const size_t l = strlen(sb);
const int cmp = memcmp(sa, sb, l);
return cmp ? cmp : isalpha(sa[l]) ? sa[l]-sb[l] : 0;
}
static double (*func(const char **const str))(double) {
static const char *const s[] = {"abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "cbrt", "ceil", "cos", "cosh", "erf", "erfc", "floor", "gamma", "ln", "log", "round", "sin", "sinh", "sqrt", "tan", "tanh", "trunc"};
static double (*const f[])(double) = {fabs, acos, acosh, asin, asinh, atan, atanh, cbrt, ceil, cos, cosh, erf, erfc, floor, tgamma, log, log10, round, sin, sinh, sqrt, tan, tanh, trunc};
const char *const *const r = bsearch(str, s, sizeof(s)/sizeof(*s), sizeof(*s), c);
return r ? *str += strlen(r), f[r-s] : NULL;
}
Example Usage:
const char *s = "log100";
printf("%g\n", func(&s)(strtod(s, NULL))); // Prints 2
Answer: Missing includes
We need a definition for size_t and the mathematical functions for this to compile; we also use undefined functions whose return type is not int. I recommend including prototypes for all of them:
#include <ctype.h>
#include <math.h>
#include <stdlib.h>
#include <string.h> | {
"domain": "codereview.stackexchange",
"id": 44376,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, c",
"url": null
} |
performance, c
Even then, abs, gamma and ln are not defined. I'm guessing fabs, tgamma and log were intended (and log10 where log is used).
restrict is unnecessary in the comparison function c(), since no values are modified.
memcmp(sa, sb, l) is undefined behaviour unless we already know that string sa is longer than sb, as is the subsequent dereference sa[l]. On the other hand, sb[l] is known to be zero.
Don't forget that we should be casting to unsigned char before passing characters to <ctype.h> functions. And if we ever want to use function names such as log2, we'll want to use isalnum() rather than isalpha().
I'm not a fan of the side-by-side arrays that must agree; that's a maintenance nightmare. Prefer an array of pairs to a pair of arrays. And we definitely need a comment telling future maintainers that the elements are required to be in sorted order.
strlen(r) doesn't make sense given that r is a char** - was strlen(*r) intended? I'm surprised that it passes basic unit tests.
Modified code
I've addressed all the issues I identified above. The testing is minimal, and should be greatly expanded, probably using one of the available test frameworks.
#include <ctype.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
struct math_fun {
const char* name;
double (*func)(double);
};
static int compare_function_name(const void *a,
const void *b)
{
const char *const *const key = a;
const struct math_fun *const entry = b;
const size_t entry_len = strlen(entry->name);
const int cmp = strncmp(*key, entry->name, entry_len);
if (cmp) {
/* mismatch: return as is */
return cmp;
}
/* else b is a prefix of a - match only a complete word, else a > b */
return isalpha((unsigned char)(*key)[entry_len]);
} | {
"domain": "codereview.stackexchange",
"id": 44376,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, c",
"url": null
} |
performance, c
/* If the first word of `str` matches a function name, returns the
corresponding function and advances `str` to the next character,
else returns a null function-pointer. */
static double (*func(const char **const str))(double)
{
static const struct math_fun functions[] = {
/* N.B. must be in `strcmp()` order */
{ "abs", fabs },
{ "acos", acos },
{ "acosh", acosh },
{ "asin", asin },
{ "asinh", asinh },
{ "atan", atan },
{ "atanh", atanh },
{ "cbrt", cbrt },
{ "ceil", ceil },
{ "cos", cos },
{ "cosh", cosh },
{ "erf", erf },
{ "erfc", erfc },
{ "floor", floor },
{ "gamma", tgamma },
{ "ln", log },
{ "log", log10 },
{ "round", round },
{ "sin", sin },
{ "sinh", sinh },
{ "sqrt", sqrt },
{ "tan", tan },
{ "tanh", tanh },
{ "trunc", trunc }
};
struct math_fun *match = bsearch(str, /* count */
functions, /* array */
sizeof functions / sizeof *functions, /* array len */
sizeof *functions, /* element size */
compare_function_name); /* comparator */
if (!match) { return 0; }
/* modify argument to point after the function name */
*str += strlen(match->name);
return match->func;
}
int main(void)
{
const char *input_tan = "tan";
const char *input_tanh = "tanh";
return func(&input_tan) != tan || *input_tan
|| func(&input_tanh) != tanh || *input_tanh;
} | {
"domain": "codereview.stackexchange",
"id": 44376,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "performance, c",
"url": null
} |
c, networking, unix, posix
Title: Multiperson chat server using the select() API
Question: I followed the Beej's Guide to Network Programming. The program acts like a multi-user chat server. One connects and sends a message to the server, which is forwarded to all that are connected. There's a simple makefile below; also Valgrind output.
Review Goals:
I have specific questions:
Is my code POSIX compliant?
Do you see any outdated practices or functions?
Does any part of my code invoke undefined behaviour?
Is it portable (across UNIX-like systems)?
How much more information can I glean about a client from a given file descriptor?
Is there an alternative to errno/perror style of error checking?
Currently, I'm only logging the client's host and service name. What other sort of information should I log? Should I look into syslog?
Do you see any potential buffer overflows?
How can I improve the code?
Code:
log.h:
#ifndef LOG_H
#define LOG_H
#define LOG_TIME 0x01 /* 0b00000001 */
#define LOG_DATE 0x02 /* 0b00000010 */
#define LOG_USER 0x04 /* 0b00000100 */
#define LOG_COUNT 0x08 /* 0b00001000 */
#define LOG_ALL 0xFF /* 0xFFFFFFFF */
#define LOG_FULLTIME 0x03 /* 0x00000011 */
#define TS_BUF_LENGHT 50
void log_msg (FILE * fp, const char *msg, uint8_t options);
#endif
log.c:
void log_msg (FILE * fp, const char *msg, uint8_t options)
{
time_t time_val;
static long long log_count = 0;
char time_stamp[TS_BUF_LENGHT];
char date_stamp[TS_BUF_LENGHT];
struct tm *tm_info;
time_val = time (0);
tm_info = localtime (&time_val);
(void) strftime (date_stamp, TS_BUF_LENGHT, "%F (%a)", tm_info);
(void) strftime (time_stamp, TS_BUF_LENGHT, "%H:%M:%S", tm_info); | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
c, networking, unix, posix
if (options & LOG_COUNT) {
fprintf (fp, "%lld\n, ", ++log_count);
}
if (options & LOG_DATE) {
fprintf (fp, "%s, ", date_stamp);
}
if (options & LOG_TIME) {
fprintf (fp, "%s, ", time_stamp);
}
fprintf (fp, "%s\n", msg);
}
selectserver.h:
#ifndef SELECTSERVER_H
#define SELECTSERVER_H
#define _POSIX_C_SOURCE 200809
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <errno.h>
#include <signal.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/time.h>
#include <netdb.h>
#include <sys/socket.h>
#include <netinet/tcp.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <sys/ioctl.h>
#define PROGRAM_NAME "selectserver"
#define PORT "9808" /* Port we are listening on */
#define MAX_LOG_TEXT 2048 /* Max text lenght for logging */
#ifdef BUFSIZ /* Max text lenght */
#define BUFSIZE BUFSIZ
#else
#define BUFSIZE 4096
#endif
#define ARRAY_CARDINALITY(x) (sizeof(x) / sizeof (x[0]))
#define NI_MAXHOST 1024
#define NI_MAXSERV 35
#define FFLUSH(fp) if (fflush (fp) == EOF) perror ("fflush()")
#define FSYNC(fd) if (fsync (fd) == -1) perror ("fsync()")
#endif
selectserver.c:
*/
* selectserver.c -- a multiperson chat server
*/
#include "selectserver.h"
#include "log.h"
FILE *log_fp = 0;
int log_fd = 0;
static void exit_cleanup (void)
{
if (log_fp) {
(void) fclose (log_fp);
}
}
static void sigint_handler (int sig)
{
(void) close (log_fd);
(void) signal (sig, SIG_DFL);
(void) raise (sig);
}
static int init_addr (struct addrinfo **servinfo)
{
struct addrinfo hints;
memset (&hints, 0x00, sizeof hints);
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE;
int status = 0;
errno = 0;
if ((status = getaddrinfo (0, PORT, &hints, servinfo)) != 0) {
perror ("getaddrinfo()");
return -1;
}
return 0;
} | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
c, networking, unix, posix
static int init_sock (struct addrinfo **servinfo)
{
int master_fd = 0;
struct addrinfo *p = 0;
int yes = 1;
socklen_t size = sizeof (yes);
errno = 0;
for (p = *servinfo; p; p = p->ai_next) {
master_fd = socket (p->ai_family, p->ai_socktype, p->ai_protocol);
if (master_fd == -1) {
perror ("socket()");
continue;
}
errno = 0;
if (setsockopt
(master_fd, SOL_SOCKET, SO_REUSEADDR, (void *) &yes,
size) == -1) {
perror ("setsockopt()");
continue;
}
errno = 0;
if (bind (master_fd, p->ai_addr, p->ai_addrlen) == -1) {
close (master_fd);
perror ("bind");
continue;
}
break;
}
if (!p) {
(void) fprintf (stderr, "%s: Failed to setup a socket.\n",
PROGRAM_NAME);
return -1;
}
errno = 0;
if (listen (master_fd, SOMAXCONN) == -1) {
perror ("listen()");
return -1;
}
return master_fd;
}
/* Synopsis: Writes the host and service names to stderr and log file.
*/
static void write_slave_info (int slave_fd, struct sockaddr slave_addr,
socklen_t addr_len)
{
char log_txt[MAX_LOG_TEXT] = { 0 };
char host[NI_MAXHOST] = { 0 };
char service[NI_MAXSERV] = { 0 };
int ret_val = 0;
errno = 0;
if ((ret_val =
getnameinfo (&slave_addr, addr_len, host, sizeof host, service,
sizeof service, NI_NUMERICHOST)) != 0) {
(void) fprintf (stderr, "%s: getnameinfo: %s\n", PROGRAM_NAME,
gai_strerror (ret_val));
}
(void) snprintf (log_txt, sizeof log_txt,
"INFO: New connection from HOST:%s, SERVICE:%s, on socket %d.",
host, service, slave_fd);
log_msg (stderr, log_txt, LOG_FULLTIME);
log_msg (log_fp, log_txt, LOG_FULLTIME); | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
c, networking, unix, posix
errno = 0;
FFLUSH (log_fp);
errno = 0;
FSYNC (log_fd);
}
/* Returns: The slave file descriptor on success, -1 otherwise.
*/
static int accept_new_connection (int master_fd)
{
int slave_fd = 0;
struct sockaddr slave_addr = { 0 };
socklen_t addr_len = sizeof slave_addr;
int yes = 1;
socklen_t size_yes = sizeof yes;
int flag = 5;
socklen_t size_flag = sizeof flag;
errno = 0;
do {
errno = 0;
slave_fd = accept (master_fd, &slave_addr, &addr_len);
} while ((slave_fd == -1) && (errno == EINTR));
if (slave_fd == -1) {
perror ("accept()");
return -1;
}
errno = 0;
if (setsockopt
(slave_fd, SOL_SOCKET, SO_KEEPALIVE, (void *) &yes,
size_yes) == -1) {
perror ("setsockopt()");
return -1;
}
errno = 0;
if (setsockopt
(slave_fd, IPPROTO_TCP, TCP_KEEPCNT, (void *) &flag,
size_flag) == -1) {
perror ("setsockopt()");
return -1;
}
errno = 0;
if (setsockopt
(slave_fd, IPPROTO_TCP, TCP_KEEPINTVL, (void *) &flag,
size_flag) == -1) {
perror ("setsockopt()");
return -1;
}
write_slave_info (slave_fd, slave_addr, addr_len);
return slave_fd;
}
static void write_farewell (int slave_fd)
{
char log_txt[MAX_LOG_TEXT] = { 0 };
(void) snprintf (log_txt, sizeof log_txt,
"INFO: Socket %d hung up.\n", slave_fd);
log_msg (stderr, log_txt, LOG_FULLTIME);
log_msg (log_fp, log_txt, LOG_FULLTIME);
FFLUSH (log_fp);
FSYNC (log_fd);
}
/* Synopsis: Get the number of bytes that are immediately available for
* reading.
* Returns: -1 on failure, number of bytes available elsewise.
*/
static int get_bytes (int slave_fd)
{
int flag = 0;
errno = 0;
if (ioctl (slave_fd, FIONREAD, &flag) == -1) {
perror ("ioctl()");
return -1;
}
return flag;
} | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
c, networking, unix, posix
/* Synopsis: Calls recv() in a loop to read as much as available.
*
* Returns: 0 on allocation failure, storing 0 in status, a pointer
* to the line elsewise.
* Stores -1 in status in case of a closed connection
* or a recv() error.
*
* Remarks: The caller is responsible for freeing the returned
* memory (in case of success), else we risk exhaustion.
*/
static char *read_line (size_t *nbytes, int slave_fd, int *status)
{
static const size_t page_size = BUFSIZE;
char *buf = realloc (0, page_size);
if (!buf) {
errno = ENOMEM;
perror ("realloc()");
free (buf);
status = 0;
return 0;
}
int flag = 0;
ssize_t ret_val = 0;
size_t total = 0;
do {
ret_val = recv (slave_fd, buf + ret_val, page_size - 1, 0);
if (ret_val > 0) {
total += (size_t) ret_val;
buf[ret_val] = '\0';
flag = get_bytes (slave_fd);
if (flag == -1) {
free (buf);
*status = -1;
return 0;
}
if (flag > 0) {
if (flag > ((page_size - 1) - (size_t) ret_val)) {
char *new = realloc (buf, page_size + page_size);
if (!new) {
errno = ENOMEM;
perror ("realloc()");
free (buf);
status = 0;
return 0;
}
}
}
} else {
flag = 0;
}
} while (flag > 0);
/* Slave closed the connection
*/
if (ret_val == 0) {
write_farewell (slave_fd);
free (buf);
*status = -1;
return 0;
}
/* We have an error, sire!
*/
if (ret_val == -1) {
perror ("recv()");
free (buf);
*status = -1;
}
*nbytes = total;
return buf;
} | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
c, networking, unix, posix
}
*nbytes = total;
return buf;
}
/* Synopsis: Calls send() in a loop to ensure that all data is sent.
Stores the number of bytes sent in len.
* Returns: 0 on success, -1 otherwise.
*/
static int send_all (int slave_fd, const char *line, size_t *len)
{
size_t total = 0;
size_t bytes_left = *len;
ssize_t status = 0;
while (total < *len) {
errno = 0;
status = send (slave_fd, line + total, bytes_left, 0);
if (status == -1) { /* A send() error */
if (errno == EINTR) {
continue;
}
perror ("send()");
break;
}
total += (size_t) status;
bytes_left -= (size_t) status;
}
*len = total;
return status == -1 ? -1 : 0;
}
static void send_msg (size_t nbytes, const char *line, int sender_fd,
int master_fd, fd_set master, int fd_max)
{
for (int i = 0; i <= fd_max; i++) {
/* Send it to everyone */
if (FD_ISSET (i, &master)) {
/* Excluding the master and sender */
if (i != master_fd && i != sender_fd) {
size_t len = nbytes;
int status = send_all (i, line, &len);
if (status == -1) {
perror ("send()");
} else if (len != nbytes) {
(void) fprintf (stderr,
"%s: We only sent %ld bytes because of a send() error.\n",
PROGRAM_NAME, len);
}
}
}
}
}
/* Synopsis: Calls select and handles new connections.
* Returns: -1 on failure to init select or ENOMEM, 0 otherwise.
*/
static int handle_connections (int master_fd)
{
fd_set master; /* Master file descriptor list */
fd_set read_fds; /* Temp file descriptor list for select() */
int fd_max = 0; /* Max descriptor seen so far */ | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
c, networking, unix, posix
FD_ZERO (&master);
FD_ZERO (&read_fds);
FD_SET (master_fd, &master);
fd_max = master_fd;
for (;;) {
errno = 0;
/* Because select is destructive */
read_fds = master;
if (select (fd_max + 1, &read_fds, 0, 0, 0) == -1) {
if (errno == EINTR) {
continue;
}
perror ("select()");
return -1;
}
/* Iterate through the existing connections looking for data to read */
for (int i = 0; i <= fd_max; i++) {
/* We have a connection */
if (FD_ISSET (i, &read_fds)) {
/* It's the master */
if (i == master_fd) {
int slave_fd = 0;
if ((slave_fd =
accept_new_connection (master_fd)) != -1) {
FD_SET (slave_fd, &master);
}
if (slave_fd > fd_max) {
fd_max = slave_fd;
}
} /* We have data to read */
else {
size_t nbytes = 0;
int status = 0;
char *line = read_line (&nbytes, i, &status);
/* A read error, or the slave closed connection */
if (!line && !status) {
return -1; /* ENOMEM */
} else if (status == -1) {
FD_CLR (i, &master);
close (i);
} else {
send_msg (nbytes, line, i, master_fd, master,
fd_max);
free (line);
}
}
}
}
}
return 0;
}
#define ARRAY_CARDINALITY(x) (sizeof(x) / sizeof (x[0]))
int main (void)
{
static sigset_t caught_signals;
sigemptyset (&caught_signals); | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
c, networking, unix, posix
int main (void)
{
static sigset_t caught_signals;
sigemptyset (&caught_signals);
static int const sig[] = {
SIGALRM, SIGHUP, SIGINT,
SIGPIPE, SIGQUIT, SIGTERM,
};
const size_t nsigs = ARRAY_CARDINALITY (sig);
struct sigaction act;
for (size_t i = 0; i < nsigs; i++) {
if (sigaction (sig[i], 0, &act) == -1) {
perror ("sigaction()");
return EXIT_FAILURE;
}
if (act.sa_handler != SIG_IGN) {
sigaddset (&caught_signals, sig[i]);
}
}
act.sa_handler = sigint_handler;
act.sa_mask = caught_signals;
act.sa_flags = 0;
for (size_t i = 0; i < nsigs; i++) {
if (sigismember (&caught_signals, sig[i])) {
if (sigaction (sig[i], &act, 0) == -1) {
perror ("sigaction()");
return EXIT_FAILURE;
}
}
}
atexit (exit_cleanup);
errno = 0;
log_fp = fopen ("server.log", "a");
if (!log_fp) {
perror ("fopen()");
return EXIT_FAILURE;
}
errno = 0;
log_fd = fileno (log_fp);
if (log_fd == -1) {
perror ("fileno()");
return EXIT_FAILURE;
}
int master_fd = 0; /* One descriptor to rule them all */
struct addrinfo *servinfo;
/* Set up the server address */
if (init_addr (&servinfo) == -1) {
return EXIT_FAILURE;
}
/* Open a socket and set it to listening */
if ((master_fd = init_sock (&servinfo)) == -1) {
freeaddrinfo (servinfo);
return EXIT_FAILURE;
}
freeaddrinfo (servinfo);
/* Wait for and eventually handle a new connection */
(void) printf ("\nListening for connections on port %s.\n", PORT);
if (handle_connections (master_fd) == -1) {
close (master_fd);
return EXIT_FAILURE;
}
close (master_fd);
return EXIT_SUCCESS;
} | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
c, networking, unix, posix
Formatting:
indent -kr -cs -pcs -saf -sai -saw --no-tabs --case-indentation 4 selectserver.c selectserver.h log.h log.c
Makefile:
# the compiler:
CC = gcc-10
# compiler flags:
CFLAGS = -O2 -pedantic-errors -Wall -Wfloat-equal -Wundef \
-Wshadow -Wpointer-arith -Wcast-align -Wstrict-prototypes \
-Wstrict-overflow=5 -Wconversion -Wformat-overflow=2 \
-Wcast-qual -Wformat-signedness -Wformat-truncation=1\
-Wunreachable-code -fanalyzer -Wformat=2 -fno-builtin \
-std=c17 -Winit-self -Wmaybe-uninitialized \
-Wuninitialized -Werror=implicit-function-declaration \
-Wmisleading-indentation -Wmissing-braces \
-Wstrict-prototypes -Wmissing-prototypes -Wredundant-decls \
-Wundef -fno-omit-frame-pointer -fno-common \
-fsanitize=address -fsanitize=undefined
TARGET = selectserver1.1
OBJS = selectserver.o log.o
DEPS = log.h selectserver.h
$(TARGET): $(OBJS)
$(CC) -o $@ $^ $(CFLAGS)
%.o: %.c $(DEPS)
$(CC) -c -o $@ $< $(CFLAGS)
clean:
rm -rf *.o
Dynamic analysis:
/* Valgrind's dump after I sent 100 simultaneous connections with a message to the server.
* I exited with SIGINT (or Ctrl-C).
*
* Flags passed: valgrind --leak-check=full --show-leak-kinds=all --show-error-list=yes -s
* --leak-check-heuristics=all --log-file=dynamic_analysis ./selectserver1.1
*/ | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
c, networking, unix, posix
==1466== Memcheck, a memory error detector
==1466== Copyright (C) 2002-2017, and GNU GPL'd, by Julian Seward et al.
==1466== Using Valgrind-3.15.0 and LibVEX; rerun with -h for copyright info
==1466== Command: ./selectserver1.1
==1466== Parent PID: 8
==1466==
==1466== error calling PR_SET_PTRACER, vgdb might block
==1466==
==1466== Process terminating with default action of signal 2 (SIGINT)
==1466== at 0x497FF7A: select (select.c:41)
==1466== by 0x10A0A4: handle_connections (selectserver.c:326)
==1466== by 0x10A565: main (selectserver.c:440)
==1466==
==1466== HEAP SUMMARY:
==1466== in use at exit: 472 bytes in 1 blocks
==1466== total heap usage: 575 allocs, 574 frees, 1,352,203 bytes allocated
==1466==
==1466== 472 bytes in 1 blocks are still reachable in loss record 1 of 1
==1466== at 0x483B7F3: malloc (in /usr/lib/x86_64-linux-gnu/valgrind/vgpreload_memcheck-amd64-linux.so)
==1466== by 0x48ED92D: __fopen_internal (iofopen.c:65)
==1466== by 0x48ED92D: fopen@@GLIBC_2.2.5 (iofopen.c:86)
==1466== by 0x10A4DC: main (selectserver.c:412)
==1466==
==1466== LEAK SUMMARY:
==1466== definitely lost: 0 bytes in 0 blocks
==1466== indirectly lost: 0 bytes in 0 blocks
==1466== possibly lost: 0 bytes in 0 blocks
==1466== still reachable: 472 bytes in 1 blocks
==1466== suppressed: 0 bytes in 0 blocks
==1466==
==1466== ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 0 from 0) | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
c, networking, unix, posix
Answer: log.h
I don't think that calling code has any need for TS_BUF_LENGHT, so this could be moved to the implementation file. Consider spelling LENGTH the usual way, too.
FILE and uint8_t are undefined. You need to include <stdio.h> and <stdint.h>, as it's not possible to portably forward-declare these typedefs.
Note that uint8_t is provided only when the underlying hardware has support for 8-bit values without padding. There's no good reason to restrict the number of flags so tightly this early in development - I recommend using plain unsigned int there.
log.c
We're missing declarations from <time.h> and <stdio.h>.
Don't ignore the return value: if we're running in a locale where the output exceeds the provided maximum length, then
strftime() returns 0, and the contents of the array are undefined.
(my emphasis). So check the result before using the buffer. We can reduce the work in this function by calling the time-related functions only when we're actually going to use the output:
void log_msg (FILE * fp, const char *msg, unsigned options)
{
time_t time_val = options & LOG_FULLTIME ? time (0) : 0;
static long long log_count = 0;
char time_stamp[TS_BUF_LENGTH];
char date_stamp[TS_BUF_LENGTH];
struct tm *tm_info = options & LOG_FULLTIME ? localtime (&time_val) : NULL;
if (options & LOG_COUNT) {
fprintf (fp, "%lld\n, ", ++log_count);
}
if (options & LOG_DATE && strftime (date_stamp, TS_BUF_LENGTH, "%F (%a)", tm_info)) {
fprintf (fp, "%s, ", date_stamp);
}
if (options & LOG_TIME && strftime (time_stamp, TS_BUF_LENGTH, "%H:%M:%S", tm_info)) {
fprintf (fp, "%s, ", time_stamp);
}
fprintf (fp, "%s\n", msg);
} | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
c, networking, unix, posix
fprintf (fp, "%s\n", msg);
}
If this program is to run as a daemon, it's probably better to allow use of syslog() - that takes care of re-opening the file during log rotation, for example.
selectserver.h
That's a lot of includes for a header file! None of them seem to be necessary (though it's polite to include <stdio.h> so the macros can be expanded).
However, I don't think we need this header at all - the implementation is where main() is, so these definitions can simply all go into selectserver.c.
POSIX_C_SOURCE should expand to a long constant, so it's best to define it as 200809L.
ARRAY_CARDINALITY() improperly protects its argument from precedence errors - we need (x)[0] rather than (x[0]):
#define ARRAY_CARDINALITY(x) (sizeof (x) / sizeof (x)[0])
The FFLUSH() and FSYNC() macros are dangerous - use the do/while(0) idiom to protect against matching the wrong else:
#define FFLUSH(fp) do { if (fflush (fp) == EOF) perror ("fflush()"); } while (0)
#define FSYNC(fd) do { if (fsync (fd) == -1) perror ("fsync()"); } while (0) | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
c, networking, unix, posix
selectserver.c
There's a lot of unnecessary resetting of errno = 0: in most cases, we test function return values and only access errno if the function has indicated that it has written to it. We can trust that these functions behave as advertised, and eliminate those effectively-dead writes.
The signal handlers appear to be overkill, since we flush logs each time we write them anyway (and I think that responsibility ought to move to log.c, rather than mixed into the application code).
In init_addr() we only ever assign to status and never use it. So that variable can be eliminated.
In init_sock() we should close master_fd before we continue when setsockopt() fails. Actually, we shouldn't continue to the next iteration - just log the failure and attempt to bind regardless.
Similarly, when we fail to listen(), we also leak the file descriptor.
accept_new_connection() also leaks fds - whenever we return something other than slave_fd, we need to ensure that it is closed first. Again, consider proceeding despite failure to set options.
In read_line, we have if (!buf) { โฆ free(buf); }. That's not wrong, but is redundant. And the subsequent status = 0 is pointless, as that's immediately followed by return (or perhaps *status = 0 was intended - it's not clear what the status values mean).
We could eliminate the duplication of the realloc() calls an consequent tests by starting with buf = NULL and moving the allocation to the beginning of the loop.
I think this is wrong:
char *new = realloc (buf, page_size + page_size);
Perhaps total + page_size?
The redefinition of ARRAY_CARDINALITY() suffers the same problem as the first one.
Makefile
A lot of the warnings are enabled by -Wall -Wextra, so are redundant. I normally write them on separate lines, like this:
CFLAGS += -std=c17
CFLAGS += -O2
CFLAGS += -pedantic-errors
CFLAGS += -Wall
CFLAGS += -Wextra
โฎ | {
"domain": "codereview.stackexchange",
"id": 44377,
"lm_label": null,
"lm_name": null,
"lm_q1_score": null,
"lm_q1q2_score": null,
"lm_q2_score": null,
"openwebmath_perplexity": null,
"openwebmath_score": null,
"tags": "c, networking, unix, posix",
"url": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.