repo_name
stringlengths 5
122
| path
stringlengths 3
232
| text
stringlengths 6
1.05M
|
|---|---|---|
mikita-kandratsyeu/crypto-wallet-app
|
src/components/Setting.tsx
|
<gh_stars>0
import React from 'react';
import { StyleSheet, Text, View } from 'react-native';
import { Switch, TouchableOpacity } from 'react-native-gesture-handler';
import { Icon } from '.';
import { colors, fonts, sizes } from '../constants';
import { SettingProps } from './types';
export const Setting: React.FC<SettingProps> = props => {
const {
title,
value = '',
switchValue = false,
type,
onPress,
onSwitchValueChange,
} = props;
if (type === 'button') {
return (
<TouchableOpacity style={styles.root} onPress={onPress}>
<Text style={styles.title}>{title}</Text>
<View style={styles.value}>
<Text
style={{
marginRight: sizes.radius,
color: colors.lightGray3,
...fonts.h3,
}}
>
{value}
</Text>
<Icon name="RightArrow" color={colors.white} />
</View>
</TouchableOpacity>
);
}
if (type === 'switch') {
return (
<View style={styles.root}>
<Text style={styles.title}>{title}</Text>
<Switch value={switchValue} onValueChange={onSwitchValueChange} />
</View>
);
}
return (
<View style={styles.root}>
<Text style={styles.title}>{title}</Text>
<View style={styles.value}>
<Text
style={{
marginRight: sizes.radius,
color: colors.lightGray3,
...fonts.h3,
}}
>
{value}
</Text>
</View>
</View>
);
};
const styles = StyleSheet.create({
root: {
flexDirection: 'row',
height: 50,
alignItems: 'center',
},
title: {
flex: 1,
color: colors.white,
...fonts.h3,
},
value: {
flexDirection: 'row',
alignItems: 'center',
},
});
|
mikita-kandratsyeu/crypto-wallet-app
|
src/screens/Profile.tsx
|
<reponame>mikita-kandratsyeu/crypto-wallet-app<gh_stars>0
import React, { useState } from 'react';
import { View, Text, StyleSheet } from 'react-native';
import { ScrollView } from 'react-native-gesture-handler';
import { MainLayoutWrapper } from '.';
import { Icon, SectionTitle, Setting } from '../components';
import { colors, dummyData, fonts, messages, sizes } from '../constants';
import { showAlert } from './services';
export const Profile: React.FC = () => {
const [faceId, setFaceId] = useState<boolean>(false);
const alertHandler = () =>
showAlert({
title: messages.titleAlert,
body: messages.bodyAlert,
buttons: [
{
text: messages.buttonAlert,
onPress: () => null,
style: 'cancel',
},
],
});
return (
<MainLayoutWrapper>
<View style={styles.root}>
<ScrollView>
<View style={styles.scrollViewContainer}>
<View style={styles.emailContainer}>
<Text style={styles.emailText}>{dummyData.profile.email}</Text>
<Text style={styles.idText}>{`ID: ${dummyData.profile.id}`}</Text>
</View>
<View style={styles.verifyContainer}>
<Icon name="Verified" color={colors.lightGreen} />
<Text
style={{
marginLeft: sizes.base,
color: colors.lightGreen,
...fonts.body4,
}}
>
{messages.verified}
</Text>
</View>
</View>
<SectionTitle title={messages.app} />
<Setting
title={messages.launchScreen}
value={messages.launchScreenValue}
onPress={alertHandler}
type="button"
/>
<Setting
title={messages.appearance}
value={messages.appearanceValue}
onPress={alertHandler}
type="button"
/>
<SectionTitle title={messages.account} />
<Setting
title={messages.paymentCurrency}
value={messages.paymentCurrencyValue}
onPress={alertHandler}
type="button"
/>
<Setting
title={messages.language}
value={messages.languageValue}
onPress={alertHandler}
type="button"
/>
<SectionTitle title={messages.security} />
<Setting
title={messages.faceId}
switchValue={faceId}
onSwitchValueChange={setFaceId}
type="switch"
/>
<Setting
title={messages.passwordSettings}
type="button"
onPress={alertHandler}
/>
<Setting
title={messages.changePassword}
type="button"
onPress={alertHandler}
/>
<Setting
title={messages.twoFactorAuth}
type="button"
onPress={alertHandler}
/>
</ScrollView>
</View>
</MainLayoutWrapper>
);
};
const styles = StyleSheet.create({
root: {
flex: 1,
paddingTop: 15,
paddingHorizontal: sizes.padding,
backgroundColor: colors.black,
},
scrollViewContainer: {
flexDirection: 'row',
marginTop: sizes.radius,
},
emailContainer: {
flex: 1,
},
emailText: {
color: colors.white,
...fonts.h3,
},
idText: {
color: colors.lightGray,
...fonts.body4,
},
verifyContainer: {
flexDirection: 'row',
alignItems: 'center',
},
});
|
mikita-kandratsyeu/crypto-wallet-app
|
src/screens/Market.tsx
|
<reponame>mikita-kandratsyeu/crypto-wallet-app<filename>src/screens/Market.tsx
import React, { useCallback, useEffect, useRef } from 'react';
import {
View,
Text,
StyleSheet,
FlatList,
Animated,
Image,
} from 'react-native';
import { connect } from 'react-redux';
import { LineChart } from 'react-native-chart-kit';
// @ts-ignore
import AnimateNumber from 'react-native-countup';
import { MainLayoutWrapper } from '.';
import { Icon, Switcher, TextButton } from '../components';
import { colors, fonts, messages, sizes } from '../constants';
import { getCoinMarket } from '../store/market/market.actions';
import { Store } from '../store/types';
import { MarketProps } from './types';
import { marketTabs } from '../components/Switcher';
const Market: React.FC<MarketProps> = props => {
const { getCoinMarket, coins } = props;
const scrollX = useRef(new Animated.Value(0)).current;
const marketTabScrollViewRef = useRef<any>();
useEffect(() => {
getCoinMarket();
}, []);
const marketHandler = useCallback(
(tabIndex: number) => {
marketTabScrollViewRef?.current?.scrollToOffset({
offset: tabIndex * sizes.width,
});
},
[marketTabScrollViewRef.current],
);
return (
<MainLayoutWrapper>
<View style={styles.root}>
<View style={styles.tabsContainer}>
<Switcher scrollX={scrollX} onPress={marketHandler} />
</View>
<View style={styles.buttonsContainer}>
<TextButton label={messages.usd} onPress={() => null} />
<TextButton
label={messages.changes7dShort}
containerStyle={styles.button}
onPress={() => null}
/>
<TextButton
label={messages.top}
containerStyle={styles.button}
onPress={() => null}
/>
</View>
<Animated.FlatList
ref={marketTabScrollViewRef}
data={marketTabs}
contentContainerStyle={{ marginTop: sizes.padding }}
horizontal
pagingEnabled
scrollEventThrottle={16}
snapToAlignment="center"
showsHorizontalScrollIndicator={false}
keyExtractor={item => item.id}
onScroll={Animated.event(
[{ nativeEvent: { contentOffset: { x: scrollX } } }],
{
useNativeDriver: false,
},
)}
renderItem={() => (
<View style={styles.columnContainer}>
<FlatList
data={coins}
keyExtractor={item => item.id}
renderItem={({ item }) => {
const priceColor = () => {
if (item.price_change_percentage_7d_in_currency === 0) {
return colors.lightGray3;
}
if (item.price_change_percentage_7d_in_currency > 0) {
return colors.lightGreen;
}
return colors.red;
};
const changeIconStyle = {
transform:
Number(item.price_change_percentage_7d_in_currency) > 0
? [{ rotate: '45deg' }]
: [{ rotate: '125deg' }],
};
return (
<View style={styles.rowContainer}>
<View style={styles.coinContainer}>
<Image
source={{ uri: item.image }}
style={styles.icon}
/>
<Text
style={{
marginLeft: sizes.radius,
color: colors.white,
...fonts.h3,
}}
>
{item.name}
</Text>
</View>
<View style={styles.chartContainer}>
<LineChart
withVerticalLabels={false}
withHorizontalLabels={false}
withDots={false}
withInnerLines={false}
withVerticalLines={false}
withOuterLines={false}
data={{
labels: [],
datasets: [
{
data: item.sparkline_in_7d.price,
},
],
}}
width={100}
height={60}
chartConfig={{
color: () => priceColor(),
}}
bezier
style={styles.chart}
/>
</View>
<View style={styles.priceContainer}>
<Text style={styles.coinsRenderPrice}>
{`$ ${Number(item.current_price).toLocaleString(
undefined,
{
maximumFractionDigits: 2,
},
)}`}
</Text>
<View style={styles.priceChangeContainer}>
{item.price_change_percentage_7d_in_currency !==
0 && (
<View style={changeIconStyle}>
<Icon
name="UpArrow"
height={10}
width={10}
color={priceColor()}
/>
</View>
)}
<Text
style={[
styles.priceChangeText,
{
color: priceColor(),
},
]}
>
<AnimateNumber
value={Number(
item.price_change_percentage_7d_in_currency ||
0,
)}
timing="linear"
interval={15}
formatter={(value: number) =>
// eslint-disable-next-line prettier/prettier
`${value.toFixed(2)}%`}
/>
</Text>
</View>
</View>
</View>
);
}}
/>
</View>
)}
/>
</View>
</MainLayoutWrapper>
);
};
const styles = StyleSheet.create({
root: {
flex: 1,
backgroundColor: colors.black,
paddingTop: 15,
},
tabsContainer: {
marginTop: sizes.radius,
marginHorizontal: sizes.radius,
borderRadius: sizes.radius,
backgroundColor: colors.gray,
},
buttonsContainer: {
flexDirection: 'row',
marginTop: sizes.radius,
marginHorizontal: sizes.radius,
},
button: {
marginLeft: sizes.base,
},
priceContainer: {
flex: 1,
alignItems: 'flex-end',
justifyContent: 'center',
},
coinsRenderPrice: {
textAlign: 'right',
color: colors.white,
...fonts.h4,
},
priceChangeContainer: {
flexDirection: 'row',
alignItems: 'center',
justifyContent: 'flex-end',
},
priceChangeText: {
marginLeft: 5,
...fonts.body5,
lineHeight: 15,
},
columnContainer: { flex: 1, width: sizes.width },
rowContainer: {
flexDirection: 'row',
paddingHorizontal: sizes.padding,
marginBottom: sizes.radius,
},
coinContainer: {
flex: 1.5,
flexDirection: 'row',
alignItems: 'center',
},
icon: {
height: 20,
width: 20,
},
chartContainer: {
flex: 1,
alignItems: 'center',
},
chart: {
paddingRight: 0,
},
});
const mapStateToProps = (state: Store) => ({
coins: state.marketReducer.coins,
});
const mapDispatchToProps = (dispatch: any) => ({
getCoinMarket: (
currency: string,
orderBy: string,
sparkline: boolean,
priceChangePerc: string,
perPage: number,
page: number,
) =>
dispatch(
getCoinMarket(
currency,
orderBy,
sparkline,
priceChangePerc,
perPage,
page,
),
),
});
export default connect(mapStateToProps, mapDispatchToProps)(Market);
|
mikita-kandratsyeu/crypto-wallet-app
|
src/navigation/types.ts
|
export interface TabsProps {
isTradeModalVisible: boolean;
setTradeModalVisibility: (isVisible: boolean) => any;
}
|
mikita-kandratsyeu/crypto-wallet-app
|
src/store/tab/tab.actions.ts
|
<gh_stars>0
import { Dispatch } from 'redux';
export const SET_TRADE_MODAL_VISIBILITY = 'SET_TRADE_MODAL_VISIBILITY';
export const setTradeModalVisibilitySuccess = (isVisible: boolean) => ({
type: SET_TRADE_MODAL_VISIBILITY,
payload: { isVisible },
});
export const setTradeModalVisibility =
(isVisible: boolean) => (dispatch: Dispatch) =>
dispatch(setTradeModalVisibilitySuccess(isVisible));
|
mikita-kandratsyeu/crypto-wallet-app
|
src/components/TabBarCustomButton.tsx
|
<gh_stars>0
import React from 'react';
import { StyleSheet, TouchableOpacity } from 'react-native';
import { TabBarCustomButtonProps } from './types';
export const TabBarCustomButton: React.FC<TabBarCustomButtonProps> = props => {
const { children, onPress } = props;
return (
<TouchableOpacity style={styles.root} onPress={onPress}>
{children}
</TouchableOpacity>
);
};
const styles = StyleSheet.create({
root: {
flex: 1,
justifyContent: 'center',
alignItems: 'center',
},
});
|
mikita-kandratsyeu/crypto-wallet-app
|
src/App.tsx
|
<reponame>mikita-kandratsyeu/crypto-wallet-app
import React from 'react';
import { createStackNavigator } from '@react-navigation/stack';
import { NavigationContainer } from '@react-navigation/native';
import { createStore, applyMiddleware } from 'redux';
import { composeWithDevTools } from 'redux-devtools-extension';
import { Provider } from 'react-redux';
import thunk from 'redux-thunk';
import rootReducer from './store/root-reducer';
import { Tabs } from './navigation';
import { stackRoutes } from './constants';
const Stack = createStackNavigator();
const middleware =
process.env.NODE_ENV === 'production'
? applyMiddleware(thunk)
: composeWithDevTools(applyMiddleware(thunk));
const store = createStore(rootReducer, middleware);
export const App: React.FC = () => (
<Provider store={store}>
<NavigationContainer>
<Stack.Navigator
screenOptions={{
headerShown: false,
}}
initialRouteName={stackRoutes.MainLayout}
>
<Stack.Screen name={stackRoutes.MainLayout} component={Tabs} />
</Stack.Navigator>
</NavigationContainer>
</Provider>
);
|
mikita-kandratsyeu/crypto-wallet-app
|
src/components/Switcher.tsx
|
import React, { useEffect, useRef, useState } from 'react';
import { Animated, StyleSheet, Text, View } from 'react-native';
import { TouchableOpacity } from 'react-native-gesture-handler';
import { colors, constants, fonts, sizes } from '../constants';
import { MarketTab } from '../constants/constants';
import { SwitcherProps, TabIndicatorProps } from './types';
export const marketTabs: MarketTab[] = constants.marketTabs.map(tab => ({
...tab,
ref: React.createRef(),
}));
const TabIndicator: React.FC<TabIndicatorProps> = props => {
const { scrollX, measureLayout } = props;
const inputRange = marketTabs.map((_, index) => index * sizes.width);
const translateX = scrollX.interpolate({
inputRange,
outputRange: measureLayout.map(measure => measure.x),
});
return (
<Animated.View
style={[
styles.tabIndicator,
{
transform: [
{
translateX,
},
],
},
]}
/>
);
};
export const Switcher: React.FC<SwitcherProps> = props => {
const { scrollX, onPress } = props;
const [measureLayout, setMeasureLayout] = useState<any[]>([]);
const containerRef = useRef<any>();
useEffect(() => {
const ml: any[] = [];
marketTabs.forEach(tab => {
tab?.ref?.current?.measureLayout(
containerRef.current,
(x: number, y: number, width: number, height: number) => {
ml.push({
x,
y,
width,
height,
});
if (ml.length === marketTabs.length) {
setMeasureLayout(ml);
}
},
);
});
}, [containerRef.current]);
return (
<View ref={containerRef} style={styles.root}>
{measureLayout.length > 0 && (
<TabIndicator measureLayout={measureLayout} scrollX={scrollX} />
)}
{marketTabs.map((tab, index) => (
<TouchableOpacity key={tab.id} onPress={() => onPress(index)}>
<View ref={tab.ref} style={styles.tab}>
<Text style={styles.text}>{tab.title}</Text>
</View>
</TouchableOpacity>
))}
</View>
);
};
const styles = StyleSheet.create({
root: {
flexDirection: 'row',
},
tab: {
paddingHorizontal: 15,
alignItems: 'center',
justifyContent: 'center',
height: 40,
width: (sizes.width - sizes.radius * 2) / 2,
},
text: {
color: colors.white,
...fonts.h3,
},
tabIndicator: {
position: 'absolute',
left: 0,
height: '100%',
width: (sizes.width - sizes.radius * 2) / 2,
borderRadius: sizes.radius,
backgroundColor: colors.lightGray,
},
});
|
mikita-kandratsyeu/crypto-wallet-app
|
src/store/root-reducer.ts
|
<reponame>mikita-kandratsyeu/crypto-wallet-app
import { combineReducers } from 'redux';
import tabReducer from './tab/tab.reducer';
import marketReducer from './market/market.reducer';
export default combineReducers({
tabReducer,
marketReducer,
});
|
mikita-kandratsyeu/crypto-wallet-app
|
src/components/index.ts
|
<reponame>mikita-kandratsyeu/crypto-wallet-app<filename>src/components/index.ts
import { TabIcon } from './TabIcon';
import { TabBarCustomButton } from './TabBarCustomButton';
import { Icon } from './Icon';
import { IconTextButton } from './IconTextButton';
import { BalanceInfo } from './BalanceInfo';
import { Chart } from './Chart';
import { Switcher } from './Switcher';
import { TextButton } from './TextButton';
import { SectionTitle } from './SectionTitle';
import { Setting } from './Setting';
export {
TabIcon,
TabBarCustomButton,
Icon,
IconTextButton,
BalanceInfo,
Chart,
Switcher,
TextButton,
SectionTitle,
Setting,
};
|
mikita-kandratsyeu/crypto-wallet-app
|
src/navigation/Tabs.tsx
|
<filename>src/navigation/Tabs.tsx<gh_stars>0
import React, { useEffect, useMemo, useRef } from 'react';
import { createBottomTabNavigator } from '@react-navigation/bottom-tabs';
import { connect } from 'react-redux';
import { StyleSheet, Animated } from 'react-native';
import DeviceInfo from 'react-native-device-info';
import { setTradeModalVisibility } from '../store/tab/tab.actions';
import { Home, Portfolio, Market, Profile, Trade } from '../screens';
import { TabBarCustomButton, TabIcon } from '../components';
import { colors, messages, stackRoutes } from '../constants';
import { Store } from '../store/types';
import { TabsProps } from './types';
const Tab = createBottomTabNavigator();
const Tabs: React.FC<TabsProps> = props => {
const { isTradeModalVisible, setTradeModalVisibility } = props;
const modalAnimatedValue = useRef(new Animated.Value(0)).current;
useEffect(() => {
if (isTradeModalVisible) {
Animated.timing(modalAnimatedValue, {
toValue: 0,
duration: 250,
useNativeDriver: false,
}).start();
} else {
Animated.timing(modalAnimatedValue, {
toValue: 1,
duration: 250,
useNativeDriver: false,
}).start();
}
}, [isTradeModalVisible]);
const tradeIcon = useMemo(() => {
return isTradeModalVisible ? 'Close' : 'Trade';
}, [isTradeModalVisible]);
const tradeTabButtonHandler = () => {
setTradeModalVisibility(!isTradeModalVisible);
};
return (
<Tab.Navigator
tabBarOptions={{
showLabel: false,
style: styles.root,
}}
>
<Tab.Screen
name={stackRoutes.Home}
component={Home}
options={{
tabBarIcon: ({ focused }) =>
!isTradeModalVisible && (
<Animated.View
style={[
{
opacity: modalAnimatedValue,
},
]}
>
<TabIcon focused={focused} icon="Home" label={messages.home} />
</Animated.View>
),
}}
listeners={{
tabPress: e => {
if (isTradeModalVisible) {
e.preventDefault();
}
},
}}
/>
<Tab.Screen
name={stackRoutes.Portfolio}
component={Portfolio}
options={{
tabBarIcon: ({ focused }) =>
!isTradeModalVisible && (
<Animated.View
style={[
{
opacity: modalAnimatedValue,
},
]}
>
<TabIcon
focused={focused}
icon="Briefcase"
label={messages.portfolio}
/>
</Animated.View>
),
}}
listeners={{
tabPress: e => {
if (isTradeModalVisible) {
e.preventDefault();
}
},
}}
/>
<Tab.Screen
name={stackRoutes.Trade}
component={Trade}
options={{
tabBarIcon: ({ focused }) => (
<TabIcon
focused={focused}
icon={tradeIcon}
label={messages.trade}
isTrade
/>
),
tabBarButton: props => (
<TabBarCustomButton {...props} onPress={tradeTabButtonHandler} />
),
}}
/>
<Tab.Screen
name={stackRoutes.Market}
component={Market}
options={{
tabBarIcon: ({ focused }) =>
!isTradeModalVisible && (
<Animated.View
style={[
{
opacity: modalAnimatedValue,
},
]}
>
<TabIcon
focused={focused}
icon="Market"
label={messages.market}
/>
</Animated.View>
),
}}
listeners={{
tabPress: e => {
if (isTradeModalVisible) {
e.preventDefault();
}
},
}}
/>
<Tab.Screen
name={stackRoutes.Profile}
component={Profile}
options={{
tabBarIcon: ({ focused }) =>
!isTradeModalVisible && (
<Animated.View
style={[
{
opacity: modalAnimatedValue,
},
]}
>
<TabIcon
focused={focused}
icon="Profile"
label={messages.profile}
/>
</Animated.View>
),
}}
listeners={{
tabPress: e => {
if (isTradeModalVisible) {
e.preventDefault();
}
},
}}
/>
</Tab.Navigator>
);
};
const styles = StyleSheet.create({
root: {
height: DeviceInfo.hasNotch() ? 130 : 90,
backgroundColor: colors.primary,
borderTopColor: 'transparent',
},
});
const mapStateToProps = (state: Store) => ({
isTradeModalVisible: state.tabReducer.isTradeModalVisible,
});
const mapDispatchToProps = (dispatch: any) => ({
setTradeModalVisibility: (isVisible: boolean) =>
dispatch(setTradeModalVisibility(isVisible)),
});
export default connect(mapStateToProps, mapDispatchToProps)(Tabs);
|
mikita-kandratsyeu/crypto-wallet-app
|
src/components/IconTextButton.tsx
|
<reponame>mikita-kandratsyeu/crypto-wallet-app<gh_stars>0
import React from 'react';
import { StyleSheet, Text, TouchableHighlight } from 'react-native';
import { Icon } from '.';
import { colors, fonts, sizes } from '../constants';
import { IconTextButtonProps } from './types';
export const IconTextButton: React.FC<IconTextButtonProps> = props => {
const { label, icon, containerStyle, onPress } = props;
return (
<TouchableHighlight
style={[styles.root, containerStyle]}
underlayColor={colors.lightGray5}
onPress={onPress}
>
<>
<Icon
name={icon}
width={styles.icon.width}
height={styles.icon.height}
color={colors.black}
/>
<Text style={styles.text}>{label}</Text>
</>
</TouchableHighlight>
);
};
const styles = StyleSheet.create({
root: {
flexDirection: 'row',
alignItems: 'center',
justifyContent: 'center',
height: 50,
borderRadius: sizes.radius,
backgroundColor: colors.white,
},
icon: {
width: 20,
height: 20,
},
text: {
marginLeft: sizes.base,
...fonts.h3,
},
});
|
mikita-kandratsyeu/crypto-wallet-app
|
src/screens/MainLayoutWrapper.tsx
|
import React, { useEffect, useRef } from 'react';
import {
StyleSheet,
Animated,
SafeAreaView,
StatusBar,
TouchableOpacity,
} from 'react-native';
import { connect } from 'react-redux';
import DeviceInfo from 'react-native-device-info';
import { IconTextButton } from '../components';
import { colors, messages, sizes } from '../constants';
import { setTradeModalVisibility } from '../store/tab/tab.actions';
import { Store } from '../store/types';
import { showAlert } from './services';
import { MainLayoutProps } from './types';
const MainLayoutWrapper: React.FC<MainLayoutProps> = props => {
const { children, isTradeModalVisible, setTradeModalVisibility } = props;
const modalAnimatedValue = useRef(new Animated.Value(0)).current;
useEffect(() => {
if (isTradeModalVisible) {
Animated.timing(modalAnimatedValue, {
toValue: 1,
duration: 250,
useNativeDriver: false,
}).start();
} else {
Animated.timing(modalAnimatedValue, {
toValue: 0,
duration: 250,
useNativeDriver: false,
}).start();
}
}, [isTradeModalVisible]);
const modalY = modalAnimatedValue.interpolate({
inputRange: [0, 1],
outputRange: [
sizes.height,
sizes.height - (DeviceInfo.hasNotch() ? 280 : 240),
],
});
const alertHandler = () =>
showAlert({
title: messages.titleAlert,
body: messages.bodyAlert,
buttons: [
{
text: messages.buttonAlert,
onPress: () => null,
style: 'cancel',
},
],
});
return (
<SafeAreaView style={styles.root}>
<StatusBar barStyle="light-content" />
{children}
{isTradeModalVisible && (
<Animated.View
style={[
styles.modalVisible,
{
opacity: modalAnimatedValue,
},
]}
>
<TouchableOpacity
onPress={() => setTradeModalVisibility?.(!isTradeModalVisible)}
style={styles.touchableArea}
/>
</Animated.View>
)}
<Animated.View style={[styles.modal, { top: modalY }]}>
<IconTextButton
label={messages.transfer}
icon="Send"
onPress={alertHandler}
/>
<IconTextButton
label={messages.withdraw}
icon="WithDraw"
containerStyle={{
marginTop: sizes.base,
}}
onPress={alertHandler}
/>
</Animated.View>
</SafeAreaView>
);
};
const styles = StyleSheet.create({
root: {
flex: 1,
backgroundColor: colors.black,
},
modal: {
position: 'absolute',
left: 0,
width: '100%',
padding: sizes.padding,
backgroundColor: colors.primary,
},
modalVisible: {
position: 'absolute',
top: 0,
left: 0,
right: 0,
bottom: 0,
backgroundColor: colors.transparentBlack,
},
touchableArea: {
position: 'relative',
width: '100%',
height: '100%',
},
});
const mapStateToProps = (state: Store) => ({
isTradeModalVisible: state.tabReducer.isTradeModalVisible,
});
const mapDispatchToProps = (dispatch: any) => ({
setTradeModalVisibility: (isVisible: boolean) =>
dispatch(setTradeModalVisibility(isVisible)),
});
export default connect(mapStateToProps, mapDispatchToProps)(MainLayoutWrapper);
|
mikita-kandratsyeu/crypto-wallet-app
|
src/components/Chart.tsx
|
<filename>src/components/Chart.tsx
import React from 'react';
import { View, StyleSheet, Text } from 'react-native';
import {
ChartDot,
ChartPath,
ChartPathProvider,
ChartXLabel,
ChartYLabel,
monotoneCubicInterpolation,
// @ts-ignore
} from '@rainbow-me/animated-charts';
import moment from 'moment';
import { colors, fonts, sizes } from '../constants';
import { ChartProps } from './types';
export const Chart: React.FC<ChartProps> = props => {
const { chartPrices, containerStyle } = props;
const startUnixTimestamp = moment().subtract(7, 'day').unix();
const data = chartPrices
? chartPrices?.map((item: any, idx: number) => {
return {
x: startUnixTimestamp + (idx + 1) * 3600,
y: item,
};
})
: [];
const points = monotoneCubicInterpolation({ data, range: 40 });
const formatNumber = (value: number, roundingPoint: number): string => {
'worklet';
if (value > 1e9) {
return `${(value / 1e9).toFixed(roundingPoint)}B`;
}
if (value > 1e6) {
return `${(value / 1e6).toFixed(roundingPoint)}M`;
}
if (value > 1e3) {
return `${(value / 1e3).toFixed(roundingPoint)}K`;
}
return value.toFixed(roundingPoint);
};
const formatUSD = (value: string): string => {
'worklet';
if (value === '') {
return '';
}
return `$ ${formatNumber(Number(value), 2)}`;
};
const formatDateTime = (value: string): string => {
'worklet';
if (value === '') {
return '';
}
const selectedDate = new Date(Number(value) * 1000);
const date = `0${selectedDate.getDate()}`.slice(-2);
const month = `0${selectedDate.getMonth() + 1}`.slice(-2);
return `${date} / ${month}`;
};
const getYAxisLabelValues = (): string[] => {
if (chartPrices !== undefined) {
const minValue = Math.min(...chartPrices);
const maxValue = Math.max(...chartPrices);
const midValue = (minValue + maxValue) / 2;
const higherMidValue = (maxValue + midValue) / 2;
const lowerMidValue = (minValue + midValue) / 2;
const roundingPoint = 2;
return [
formatNumber(maxValue, roundingPoint),
formatNumber(higherMidValue, roundingPoint),
formatNumber(lowerMidValue, roundingPoint),
formatNumber(minValue, roundingPoint),
];
}
return [];
};
return (
<View style={{ ...containerStyle }}>
<View style={styles.yAxisContainer}>
{getYAxisLabelValues().map((item: string) => (
<Text key={item} style={styles.yAxisItem}>
{item}
</Text>
))}
</View>
{data.length > 0 && (
<ChartPathProvider data={{ points, smoothingStrategy: 'bezier' }}>
<ChartPath
height={150}
width={sizes.width}
stroke={colors.lightGreen}
strokeWidth={2}
/>
<ChartDot>
<View style={styles.chartDotContainer}>
<View style={styles.chartDot}>
<View style={styles.chartDotInternal} />
</View>
<ChartYLabel style={styles.chartYLabel} format={formatUSD} />
<ChartXLabel style={styles.chartXLabel} format={formatDateTime} />
</View>
</ChartDot>
</ChartPathProvider>
)}
</View>
);
};
const styles = StyleSheet.create({
chartDotContainer: {
position: 'absolute',
left: -35,
width: 80,
alignItems: 'center',
backgroundColor: colors.transparentBlack,
},
yAxisContainer: {
position: 'absolute',
left: sizes.padding,
top: 0,
bottom: 0,
justifyContent: 'space-between',
},
yAxisItem: {
color: colors.lightGray3,
...fonts.body4,
},
chartDot: {
alignItems: 'center',
justifyContent: 'center',
width: 25,
height: 25,
borderRadius: 15,
backgroundColor: colors.white,
},
chartDotInternal: {
width: 15,
height: 15,
borderRadius: 10,
backgroundColor: colors.lightGreen,
},
chartYLabel: {
color: colors.white,
...fonts.body5,
},
chartXLabel: {
marginTop: 3,
color: colors.lightGray3,
...fonts.body5,
lineHeight: 15,
},
});
|
mikita-kandratsyeu/crypto-wallet-app
|
src/components/BalanceInfo.tsx
|
<reponame>mikita-kandratsyeu/crypto-wallet-app
import React, { useMemo } from 'react';
import { StyleSheet, View, Text } from 'react-native';
// @ts-ignore
import AnimateNumber from 'react-native-countup';
import { Icon } from '.';
import { colors, fonts, messages, sizes } from '../constants';
import { BalanceInfoProps } from './types';
export const BalanceInfo: React.FC<BalanceInfoProps> = props => {
const { title, displayAmount, changePct, containerStyle } = props;
const changePercentIconStyle = useMemo(() => {
return [
styles.changePercentIcon,
{
transform:
Number(changePct) > 0
? [{ rotate: '45deg' }]
: [{ rotate: '125deg' }],
},
];
}, [changePct]);
const changePercentTextStyle = useMemo(() => {
return [
styles.changePercentText,
{ color: Number(changePct) > 0 ? colors.lightGreen : colors.red },
];
}, [changePct]);
return (
<View style={{ ...containerStyle }}>
<Text style={styles.text}>{title}</Text>
<View style={styles.figuresContainer}>
<Text style={styles.figuresCurrencySymbol}>$</Text>
<Text style={styles.figuresAmount}>
<AnimateNumber
value={Number(displayAmount || 0)}
timing="linear"
interval={15}
formatter={(value: number) => value.toLocaleString()}
/>
</Text>
<Text style={styles.figuresCurrency}>{messages.usd}</Text>
</View>
<View style={styles.changePercentContainer}>
{Number(changePct || 0) !== 0 && (
<View style={changePercentIconStyle}>
<Icon
name="UpArrow"
width={10}
height={10}
color={Number(changePct) > 0 ? colors.lightGreen : colors.red}
/>
</View>
)}
<Text style={changePercentTextStyle}>
<AnimateNumber
value={Number(changePct || 0)}
timing="linear"
interval={15}
formatter={(value: number) => `${value.toFixed(2)}%`}
/>
</Text>
<Text style={styles.change7d}>{messages.changes7d}</Text>
</View>
</View>
);
};
const styles = StyleSheet.create({
text: {
color: colors.white,
...fonts.h3,
},
figuresContainer: {
flexDirection: 'row',
alignItems: 'flex-end',
},
figuresCurrencySymbol: {
color: colors.lightGray3,
...fonts.h2,
},
figuresAmount: {
color: colors.white,
marginLeft: sizes.base,
...fonts.h2,
},
figuresCurrency: {
color: colors.lightGray3,
...fonts.h2,
marginLeft: 5,
},
changePercentContainer: {
flexDirection: 'row',
alignItems: 'flex-end',
},
changePercentIcon: {
alignSelf: 'center',
},
changePercentText: {
marginLeft: sizes.base,
alignSelf: 'flex-end',
...fonts.h4,
},
change7d: {
marginLeft: sizes.radius,
alignSelf: 'flex-end',
color: colors.lightGray3,
...fonts.h5,
},
});
|
mikita-kandratsyeu/crypto-wallet-app
|
src/constants/theme.ts
|
import { Dimensions } from 'react-native';
const { width, height } = Dimensions.get('window');
export const colors = {
primary: '#1E1E1E',
secondary: '#3B3B3B',
white: '#fff',
lightGreen: '#4BEE70',
red: '#D84035',
black: '#000000',
gray: '#212125',
gray1: '#1f1f1f',
lightGray: '#3B3B3B',
lightGray2: '#212125',
lightGray3: '#757575',
lightGray5: '#d3d3d3',
transparentWhite: 'rgba(255, 255, 255, 0.2)',
transparentBlack: 'rgba(0, 0, 0, 0.8)',
transparentBlack1: 'rgba(0, 0, 0, 0.4)',
};
export const sizes = {
// global sizes
base: 8,
font: 14,
radius: 12,
padding: 24,
// font sizes
largeTitle: 40,
h1: 30,
h2: 22,
h3: 16,
h4: 14,
h5: 12,
body1: 30,
body2: 22,
body3: 16,
body4: 14,
body5: 12,
// app dimensions
width,
height,
};
export const fonts = {
largeTitle: { fontFamily: 'Roboto-Black', fontSize: sizes.largeTitle },
h1: { fontFamily: 'Roboto-Black', fontSize: sizes.h1, lineHeight: 36 },
h2: { fontFamily: 'Roboto-Bold', fontSize: sizes.h2, lineHeight: 30 },
h3: { fontFamily: 'Roboto-Bold', fontSize: sizes.h3, lineHeight: 22 },
h4: { fontFamily: 'Roboto-Bold', fontSize: sizes.h4, lineHeight: 22 },
h5: { fontFamily: 'Roboto-Bold', fontSize: sizes.h5, lineHeight: 22 },
body1: {
fontFamily: 'Roboto-Regular',
fontSize: sizes.body1,
lineHeight: 36,
},
body2: {
fontFamily: 'Roboto-Regular',
fontSize: sizes.body2,
lineHeight: 30,
},
body3: {
fontFamily: 'Roboto-Regular',
fontSize: sizes.body3,
lineHeight: 22,
},
body4: {
fontFamily: 'Roboto-Regular',
fontSize: sizes.body4,
lineHeight: 22,
},
body5: {
fontFamily: 'Roboto-Regular',
fontSize: sizes.body5,
lineHeight: 22,
},
};
const appTheme = { colors, sizes, fonts };
export default appTheme;
|
mikita-kandratsyeu/crypto-wallet-app
|
src/constants/index.ts
|
import constants from './constants';
import dummyData from './dummy';
import theme, { colors, sizes, fonts } from './theme';
import messages from './messages';
import stackRoutes from './routes';
export {
constants,
dummyData,
theme,
colors,
sizes,
fonts,
messages,
stackRoutes,
};
|
mikita-kandratsyeu/crypto-wallet-app
|
src/constants/dummy.ts
|
<gh_stars>0
export interface Holdings {
id: string;
qty: number;
}
export interface Profile {
id: number;
email: string;
}
export interface Settings {
launchScreen: string;
currency: string;
appearance: string;
language: string;
faceId: boolean;
}
export const holdings: Holdings[] = [
{
id: 'bitcoin',
qty: 888,
},
{
id: 'ethereum',
qty: 188,
},
{
id: 'dogecoin',
qty: 88888,
},
];
export const profile: Profile = {
id: 8888888,
email: '<EMAIL>',
};
export const settings: Settings = {
launchScreen: 'Home',
currency: 'USD',
appearance: 'Dark',
language: 'English',
faceId: true,
};
const dummyData = {
holdings,
profile,
settings,
};
export default dummyData;
|
mikita-kandratsyeu/crypto-wallet-app
|
src/screens/types.ts
|
<filename>src/screens/types.ts<gh_stars>0
export interface HomeProps {
myHoldings: any[];
coins: any[];
getHoldings: any;
getCoinMarket: any;
}
export interface MainLayoutProps {
children: React.ReactNode;
isTradeModalVisible?: boolean;
setTradeModalVisibility?: (isVisible: boolean) => any;
}
export interface PortfolioProps {
myHoldings: any[];
getHoldings: any;
}
export interface MarketProps {
getCoinMarket: any;
coins: any[];
}
|
mikita-kandratsyeu/crypto-wallet-app
|
src/components/TabIcon.tsx
|
import React, { useMemo } from 'react';
import { StyleSheet, View, Text } from 'react-native';
import { Icon } from '.';
import { fonts, colors } from '../constants';
import { TabIconProps } from './types';
export const TabIcon: React.FC<TabIconProps> = props => {
const { focused, icon, label, isTrade } = props;
const iconColor = useMemo(() => {
return focused ? colors.white : colors.lightGray3;
}, [focused]);
return isTrade ? (
<View style={[styles.root, styles.tradeContainer]}>
<Icon
name={icon}
width={styles.icon.width}
height={styles.icon.height}
color={colors.white}
/>
<Text style={styles.text}>{label}</Text>
</View>
) : (
<View style={styles.root}>
<Icon
name={icon}
width={styles.icon.width}
height={styles.icon.height}
color={iconColor}
/>
<Text style={[styles.text, { color: iconColor }]}>{label}</Text>
</View>
);
};
const styles = StyleSheet.create({
root: {
alignItems: 'center',
justifyContent: 'center',
},
tradeContainer: {
width: 60,
height: 60,
borderRadius: 30,
backgroundColor: colors.black,
},
icon: {
width: 15,
height: 15,
},
text: {
marginTop: 5,
...fonts.h4,
color: colors.white,
},
});
|
mikita-kandratsyeu/crypto-wallet-app
|
src/screens/Home.tsx
|
<reponame>mikita-kandratsyeu/crypto-wallet-app
import React, { useCallback, useState } from 'react';
import { connect } from 'react-redux';
import {
View,
StyleSheet,
FlatList,
Text,
TouchableOpacity,
Image,
} from 'react-native';
// @ts-ignore
import AnimateNumber from 'react-native-countup';
import { useFocusEffect } from '@react-navigation/core';
import { MainLayoutWrapper } from '.';
import { Store } from '../store/types';
import { getHoldings, getCoinMarket } from '../store/market/market.actions';
import { colors, dummyData, fonts, messages, sizes } from '../constants';
import { BalanceInfo, Chart, Icon, IconTextButton } from '../components';
import { HomeProps } from './types';
import { getTotalWallet, getValueChange, showAlert } from './services';
const Home: React.FC<HomeProps> = props => {
const { myHoldings, coins, getHoldings, getCoinMarket } = props;
useFocusEffect(
useCallback(() => {
getHoldings(dummyData.holdings);
getCoinMarket();
}, []),
);
const [selectedCoin, setSelectedCoin] = useState<any>(null);
const valueChange = getValueChange(myHoldings);
const totalWallet = getTotalWallet(myHoldings);
const percentChange = (valueChange / (totalWallet - valueChange)) * 100;
const alertHandler = () =>
showAlert({
title: messages.titleAlert,
body: messages.bodyAlert,
buttons: [
{
text: messages.buttonAlert,
onPress: () => null,
style: 'cancel',
},
],
});
return (
<MainLayoutWrapper>
<View style={styles.root}>
<View style={styles.walletInfoContainer}>
<BalanceInfo
title={messages.yourWallet}
displayAmount={totalWallet}
changePct={percentChange}
containerStyle={styles.balanceInfoContainer}
/>
</View>
<View style={styles.iconTextButtonContainer}>
<IconTextButton
label={messages.transfer}
icon="Send"
containerStyle={styles.iconTextButton}
onPress={alertHandler}
/>
<IconTextButton
label={messages.withdraw}
icon="WithDraw"
containerStyle={styles.iconTextButton}
onPress={alertHandler}
/>
</View>
<Chart
containerStyle={{ marginTop: sizes.padding }}
chartPrices={
selectedCoin
? // eslint-disable-next-line camelcase
selectedCoin?.sparkline_in_7d?.price
: // eslint-disable-next-line camelcase
coins[0]?.sparkline_in_7d?.price
}
/>
<FlatList
data={coins}
keyExtractor={item => item.id}
contentContainerStyle={styles.listCoins}
// eslint-disable-next-line prettier/prettier
ListHeaderComponent={(
<View style={styles.listCoinsHeader}>
<Text style={styles.listCoinsTextHeader}>
{messages.topCryptoCurrency}
</Text>
</View>
// eslint-disable-next-line prettier/prettier
)}
renderItem={({ item }) => {
const priceColor = () => {
if (item.price_change_percentage_7d_in_currency === 0) {
return colors.lightGray3;
}
if (item.price_change_percentage_7d_in_currency > 0) {
return colors.lightGreen;
}
return colors.red;
};
const changeIconStyle = {
transform:
Number(item.price_change_percentage_7d_in_currency) > 0
? [{ rotate: '45deg' }]
: [{ rotate: '125deg' }],
};
return (
<TouchableOpacity
style={styles.listCoinsRenderRoot}
onPress={() => setSelectedCoin(item)}
>
<View style={styles.listCoinsRender}>
<Image
source={{ uri: item.image }}
style={styles.listCoinsIcon}
/>
</View>
<View style={styles.listCoinsTextContainer}>
<Text style={styles.listCoinsText}>{item.name}</Text>
</View>
<View>
<Text style={styles.listCoinsRenderPrice}>
{`$ ${Number(item.current_price).toLocaleString(undefined, {
maximumFractionDigits: 2,
})}`}
</Text>
<View style={styles.priceChangeContainer}>
{item.price_change_percentage_7d_in_currency !== 0 && (
<View style={changeIconStyle}>
<Icon
name="UpArrow"
height={10}
width={10}
color={priceColor()}
/>
</View>
)}
<Text
style={[
styles.priceChangeText,
{
color: priceColor(),
},
]}
>
<AnimateNumber
value={Number(
item.price_change_percentage_7d_in_currency || 0,
)}
timing="linear"
interval={15}
formatter={(value: number) => `${value.toFixed(2)}%`}
/>
</Text>
</View>
</View>
</TouchableOpacity>
);
}}
ListFooterComponent={<View style={styles.footer} />}
/>
</View>
</MainLayoutWrapper>
);
};
const styles = StyleSheet.create({
root: {
flex: 1,
paddingTop: 15,
},
walletInfoContainer: {
paddingHorizontal: sizes.padding,
paddingBottom: sizes.padding * 2,
borderRadius: 25,
backgroundColor: colors.gray,
marginLeft: sizes.base,
marginRight: sizes.base,
},
balanceInfoContainer: {
marginTop: 15,
marginBottom: 15,
},
iconTextButtonContainer: {
flexDirection: 'row',
marginTop: -20,
paddingHorizontal: sizes.radius * 3,
},
iconTextButton: {
flex: 1,
height: 40,
marginRight: Math.floor(sizes.radius / 2),
marginLeft: Math.floor(sizes.radius / 2),
},
listCoins: {
marginTop: 30,
paddingHorizontal: sizes.padding,
},
listCoinsHeader: {
marginBottom: sizes.radius,
},
listCoinsText: {
color: colors.white,
...fonts.h3,
},
listCoinsTextHeader: {
color: colors.white,
...fonts.h3,
fontSize: 18,
},
listCoinsRenderRoot: {
height: 55,
flexDirection: 'row',
alignItems: 'center',
justifyContent: 'center',
},
listCoinsRender: {
width: 35,
},
listCoinsIcon: {
width: 20,
height: 20,
},
listCoinsTextContainer: {
flex: 1,
},
listCoinsRenderText: {
color: colors.white,
...fonts.h3,
},
listCoinsRenderPrice: {
textAlign: 'right',
color: colors.white,
...fonts.h4,
},
priceChangeContainer: {
flexDirection: 'row',
alignItems: 'center',
justifyContent: 'flex-end',
},
priceChangeText: {
marginLeft: 5,
...fonts.body5,
lineHeight: 15,
},
footer: {
marginBottom: 50,
},
});
const mapStateToProps = (state: Store) => ({
myHoldings: state.marketReducer.myHoldings,
coins: state.marketReducer.coins,
});
const mapDispatchToProps = (dispatch: any) => ({
getHoldings: (
holdings: any[],
currency: string,
orderBy: string,
sparkline: boolean,
priceChangePerc: string,
perPage: number,
page: number,
) =>
dispatch(
getHoldings(
holdings,
currency,
orderBy,
sparkline,
priceChangePerc,
perPage,
page,
),
),
getCoinMarket: (
currency: string,
orderBy: string,
sparkline: boolean,
priceChangePerc: string,
perPage: number,
page: number,
) =>
dispatch(
getCoinMarket(
currency,
orderBy,
sparkline,
priceChangePerc,
perPage,
page,
),
),
});
export default connect(mapStateToProps, mapDispatchToProps)(Home);
|
mikita-kandratsyeu/crypto-wallet-app
|
src/navigation/index.ts
|
import Tabs from './Tabs';
export { Tabs };
|
KashiPrg/PDF_LineBreaks_Remover
|
src/app.ts
|
<reponame>KashiPrg/PDF_LineBreaks_Remover<gh_stars>0
// 拡張機能がインストールされたときに右クリックメニューを追加
chrome.runtime.onInstalled.addListener((): void => {
chrome.contextMenus.create({
id: "convert_line_breaks_menu",
title: chrome.i18n.getMessage("conMenuCopiedTextLineBreaksRemove"),
contexts: ["all"],
type: "normal",
onclick: convertLineBreaks
});
});
function convertLineBreaks(): void {
chrome.tabs.query({active: true, lastFocusedWindow: true}, tabs => {
if (tabs.length > 0) {
// document.execCommand("copy")は何かしらの文面にフォーカスが当たっていないと使えないため、
// フォーカス用のテキストエリアを用意する
const textAreaForCopy = document.createElement("textarea");
textAreaForCopy.textContent = ``;
// そのページのbodyタグの要素を取得し、用意したテキストエリアを追加
document.querySelector("body")?.append(textAreaForCopy);
// テキストエリアを選択(なお、この時すでに選択している箇所は選択解除されない)
textAreaForCopy.focus();
// テキストエリアにクリップボードの文字列をペースト
document.execCommand("paste");
// テキストエリアの文字列に含まれる全ての改行を、半角スペース一つに置き換える
// 空文字列にしないのは、行末が文末であった場合に"aaaaa.bbbbb"と続けて記述されるのを避けるため
// 空行が挟まれていた場合は空行の数だけ半角スペースが生じることになるがそれはご愛嬌。パラグラフ単位での利用を想定している
textAreaForCopy.value = textAreaForCopy.value.replace(/\n/g, ' ');
// テキストエリアを選択(なお、この時すでに選択している箇所は選択解除されない)
textAreaForCopy.select();
// 選択された箇所をコピー
document.execCommand("copy");
// 追加されたテキストエリアは本来余分なものなので消しておく
textAreaForCopy.remove();
}
});
}
chrome.commands.onCommand.addListener(function (command) {
switch(command) {
case "convert_LineBreaks":
convertLineBreaks();
break;
default:
break;
}
});
|
Otto-Health/opentok-network-test-js
|
src/util/index.ts
|
/**
* @module Util
*/
/**
* Returns a copy of an object, setting or overriding the property with the provided value
*/
export const assoc = (key: string, value: any, obj: Object): Object => ({ ...obj, [key]: value });
/**
* Returns a copy of an object, setting or overriding the property at the specified path
* with the provided value. The path should be provided as a period-delimited string.
*/
export const assocPath = (path: string, value: any, obj: Object): Object => {
const keys: string[] = path.split('.');
const key = keys[0];
if (!keys.length) {
return obj;
} else if (keys.length === 1) {
return assoc(key, value, obj);
} else {
const valForKey = get(key, obj);
const base: Object = (!!valForKey && typeof valForKey === 'object') ? valForKey : { ...obj, [key]: {} };
const update = assoc(key, assocPath(keys.slice(1).join('.'), value, get(key, base)), obj);
return { ...obj, ...update };
}
};
/**
* Returns a (nested) property from the provided object or undefined
*/
export const get = <T>(props: string, obj: any): T => {
let result = Object.assign({}, obj);
const properties = typeof props === 'string' ? props.split('.') : props;
properties.some((p) => {
result = result[p];
return (result === undefined);
});
return result;
};
/**
* Returns a (nested) property from the provided object or the default
* value if undefined
*/
export const getOr = <T>(defaultValue: any, props: string, obj: any): T => get(props, obj) || defaultValue;
/**
* Returns a subset of the provided object with the specified properties. Keys whose corresponding
* values are undefined are not included.
*/
export const pick =
<T extends { [key: string]: any }, K extends keyof T>(
props: K[],
obj: T,
all: boolean = false): Partial<T> => {
const update = (acc: object, prop: string): Partial<T> =>
obj[prop] !== undefined || all ? { ...acc, [prop]: obj[prop] } : acc;
return props.reduce(update, {});
};
/**
* Returns a subset of the provided object with the specified properties. Keys whose corresponding
* values are undefined are included.
*/
export const pickAll = <T extends { [key: string]: any }, K extends keyof T>(props: K[], obj: T): Partial<T> =>
pick(props, obj, true);
/**
* Returns the last element from an array
*/
export const last = <T>(list: T[]): (T | undefined) => list[list.length - 1];
/**
* Returns the nth element of an array. If a negative value is passed, the nth element from the end
* of the array will be returned.
*/
export const nth = <T>(n: number, list: T[]): (T | undefined) => {
return n < 0 ? list[list.length + n] : list[n];
};
/**
* Returns the first element from a list, or undefined if it doesn't exist
*/
export const head = <T>(list: T[]): (T | undefined) => nth(0, list);
|
Otto-Health/opentok-network-test-js
|
src/NetworkTest/testQuality/helpers/MOSState.ts
|
<reponame>Otto-Health/opentok-network-test-js
export default class MOSState {
statsLog: OT.SubscriberStats[];
audioScoresLog: number[];
videoScoresLog: number[];
stats: HasAudioVideo<AverageStats> = { audio: {}, video: {} };
bandwidth: Bandwidth = { audio: 0, video: 0 };
intervalId?: number;
constructor() {
this.statsLog = [];
this.audioScoresLog = [];
this.videoScoresLog = [];
}
static readonly maxLogLength: number = 1000;
static readonly scoreInterval: number = 1000;
readonly hasAudioTrack = (): boolean => this.statsLog[0] && !!this.statsLog[0].audio;
readonly hasVideoTrack = (): boolean => this.statsLog[0] && !!this.statsLog[0].video;
private audioScore(): number {
return this.audioScoresLog.reduce((acc, score) => acc + score, 0) / this.audioScoresLog.length;
}
private videoScore(): number {
return this.videoScoresLog.reduce((acc, score) => acc + score, 0) / this.videoScoresLog.length;
}
clearInterval() {
if (this.intervalId) {
window.clearInterval(this.intervalId);
}
this.intervalId = undefined;
}
private pruneAudioScores() {
const { audioScoresLog } = this;
while (audioScoresLog.length > MOSState.maxLogLength) {
audioScoresLog.shift();
}
this.audioScoresLog = audioScoresLog;
}
private pruneVideoScores() {
const { videoScoresLog } = this;
while (videoScoresLog.length > MOSState.maxLogLength) {
videoScoresLog.shift();
}
this.videoScoresLog = videoScoresLog;
}
pruneScores() {
this.pruneAudioScores();
this.pruneVideoScores();
}
qualityScore(): number {
const hasAudioTrack = this.hasAudioTrack();
const hasVideoTrack = this.hasVideoTrack();
if (hasAudioTrack && hasVideoTrack) {
return Math.min(this.audioScore(), this.videoScore());
} else if (hasAudioTrack && !hasVideoTrack) {
return this.audioScore();
} else if (!hasAudioTrack && hasVideoTrack) {
return this.videoScore();
} else {
return 0;
}
}
}
|
Otto-Health/opentok-network-test-js
|
src/NetworkTest/errors/types.ts
|
<filename>src/NetworkTest/errors/types.ts
/**
* @module Errors/Connectivity/OpenTok
*/
/**
* Define errors returned by OpenTok.js
*/
import { get } from '../../util';
export enum OTErrorType {
JS_EXCEPTION = 'JS_EXCEPTION',
AUTHENTICATION_ERROR = 'AUTHENTICATION_ERROR',
OT_AUTHENTICATION_ERROR = 'OT_AUTHENTICATION_ERROR',
OT_INVALID_HTTP_STATUS = 'OT_INVALID_HTTP_STATUS',
OT_CONNECT_FAILED = 'OT_CONNECT_FAILED',
INVALID_SESSION_ID = 'INVALID_SESSION_ID',
CONNECT_FAILED = 'CONNECT_FAILED',
CONNECT_REJECTED = 'CONNECT_REJECTED',
CONNECTION_TIMEOUT = 'CONNECTION_TIMEOUT',
NOT_CONNECTED = 'NOT_CONNECTED',
INVALID_PARAMETER = 'INVALID_PARAMETER',
P2P_CONNECTION_FAILED = 'P2P_CONNECTION_FAILED',
API_RESPONSE_FAILURE = 'API_RESPONSE_FAILURE',
TERMS_OF_SERVICE_FAILURE = 'TERMS_OF_SERVICE_FAILURE',
CONNECTION_LIMIT_EXCEEDED = 'CONNECTION_LIMIT_EXCEEDED',
UNABLE_TO_PUBLISH = 'UNABLE_TO_PUBLISH',
UNABLE_TO_SUBSCRIBE = 'UNABLE_TO_SUBSCRIBE',
UNSUPPORTED_VIDEO_CODEC = 'UNSUPPORTED_VIDEO_CODEC',
UNABLE_TO_FORCE_DISCONNECT = 'UNABLE_TO_FORCE_DISCONNECT',
UNABLE_TO_FORCE_UNPUBLISH = 'UNABLE_TO_FORCE_UNPUBLISH',
PUBLISHER_ICE_WORKFLOW_FAILED = 'PUBLISHER_ICE_WORKFLOW_FAILED',
SUBSCRIBER_ICE_WORKFLOW_FAILED = 'SUBSCRIBER_ICE_WORKFLOW_FAILED',
STREAM_LIMIT_EXCEEDED = 'STREAM_LIMIT_EXCEEDED',
UNEXPECTED_SERVER_RESPONSE = 'UNEXPECTED_SERVER_RESPONSE',
REPORT_ISSUE_ERROR = 'REPORT_ISSUE_ERROR',
ANVIL_BADLY_FORMED_RESPONSE = 'ANVIL_BADLY_FORMED_RESPONSE',
ANVIL_INVALID_HTTP_STATUS = 'ANVIL_INVALID_HTTP_STATUS',
ANVIL_XDOMAIN_OR_PARSING_ERROR = 'ANVIL_XDOMAIN_OR_PARSING_ERROR',
ANVIL_UNKNOWN_HTTP_ERROR = 'ANVIL_UNKNOWN_HTTP_ERROR',
ANVIL_UNEXPECTED_ERROR_CODE = 'ANVIL_UNEXPECTED_ERROR_CODE',
ANVIL_EMPTY_RESPONSE_BODY = 'ANVIL_EMPTY_RESPONSE_BODY',
ANVIL_CONNECT_FAILED = 'ANVIL_CONNECT_FAILED',
}
export const errorHasName =
(error: OT.OTError | null = null, name: OTErrorType): boolean => get('name', error) === name;
|
Otto-Health/opentok-network-test-js
|
test/NetworkTest.spec.ts
|
/* tslint:disable */
///<reference path="../src/types/index.d.ts"/>
import * as OT from '@opentok/client';
import * as Promise from 'promise';
import {
primary as sessionCredentials,
faultyLogging as badLoggingCredentials,
faultyApi as badApiCredentials,
} from './credentials.json';
import {
NetworkTestError,
InvalidSessionCredentialsError,
MissingOpenTokInstanceError,
MissingSessionCredentialsError,
IncompleteSessionCredentialsError,
InvalidOnCompleteCallback,
InvalidOnUpdateCallback,
} from '../src/NetworkTest/errors';
import { ConnectToSessionTokenError, ConnectToSessionSessionIdError, ConnectivityError, ConnectToSessionError, PublishToSessionError } from '../src/NetworkTest/testConnectivity/errors';
import { ConnectToSessionError as QualityTestSessionError } from '../src/NetworkTest/testQuality/errors';
import { pick, head, nth } from '../src/util';
import NetworkTest from '../src/NetworkTest';
import { ConnectivityTestResults } from '../src/NetworkTest/testConnectivity/index';
import { QualityTestError } from '../src/NetworkTest/testQuality/errors/index';
import { Stats } from 'fs-extra';
type Util = jasmine.MatchersUtil;
type CustomMatcher = jasmine.CustomMatcher;
type EqualityTesters = jasmine.CustomEqualityTester[];
const malformedCredentials = { apiKey: '1234', invalidProp: '1234', token: '1234' };
const badCredentials = { apiKey: '1234', sessionId: '1234', token: '1234' };
const networkTest = new NetworkTest(OT, sessionCredentials);
const badCredentialsNetworkTest = new NetworkTest(OT, badCredentials);
const validOnUpdateCallback = (stats: OT.SubscriberStats) => stats;
const validOnCompleteCallback = (error?: Error, results?: any) => results;
const customMatchers: jasmine.CustomMatcherFactories = {
toBeInstanceOf: (util: Util, customEqualityTesters: EqualityTesters): CustomMatcher => {
return {
compare: (actual: any, expected: any): jasmine.CustomMatcherResult => {
const pass: boolean = actual instanceof expected;
const message: string = pass ? '' : `Expected ${actual} to be an instance of ${expected}`;
return { pass, message };
},
};
},
toBeABoolean: (util: Util, customEqualityTesters: EqualityTesters): CustomMatcher => {
return {
compare: (actual: any, expected: any): jasmine.CustomMatcherResult => {
const pass: boolean = typeof actual === 'boolean';
const message: string = pass ? '' : `Expected ${actual} to be an instance of ${expected}`;
return { pass, message };
},
};
},
};
describe('Network Test', () => {
beforeAll(() => {
jasmine.addMatchers(customMatchers);
});
it('its constructor requires OT and valid session credentials', () => {
expect(() => new NetworkTest(sessionCredentials)).toThrow(new MissingOpenTokInstanceError());
expect(() => new NetworkTest({}, sessionCredentials)).toThrow(new MissingOpenTokInstanceError());
expect(() => new NetworkTest(OT)).toThrow(new MissingSessionCredentialsError());
expect(() => new NetworkTest(OT, malformedCredentials)).toThrow(new IncompleteSessionCredentialsError());
expect(new NetworkTest(OT, sessionCredentials)).toBeInstanceOf(NetworkTest);
});
describe('Connectivity Test', () => {
it('validates its onComplete callback', () => {
expect(() => networkTest.testConnectivity('callback').toThrow(new InvalidOnCompleteCallback()))
expect(() => networkTest.testConnectivity(validOnCompleteCallback).not.toThrowError(NetworkTestError))
});
describe('Test Results', () => {
it('should contain success and failedTests properties', (done) => {
networkTest.testConnectivity()
.then((results: ConnectivityTestResults) => {
it('should contain a boolean success property', () => {
expect(results.success).toBeABoolean
});
it('should contain an array of failedTests', () => {
expect(results.failedTests).toBeInstanceOf(Array);
});
done();
});
}, 10000);
it('should return a failed test case if invalid session credentials are used', (done) => {
const validateResults = (results: ConnectivityTestResults) => {
expect(results.success).toBe(false);
expect(results.failedTests).toBeInstanceOf(Array);
const [initialFailure, secondaryFailure] = results.failedTests;
expect(initialFailure.type).toBe('messaging');
expect(initialFailure.error).toBeInstanceOf(ConnectToSessionError);
expect(secondaryFailure.type).toBe('media');
expect(secondaryFailure.error).toBeInstanceOf(PublishToSessionError);
};
const validateError = (error?: ConnectivityError) => {
expect(error).toBeUndefined();
};
badCredentialsNetworkTest.testConnectivity()
.then(validateResults)
.catch(validateError)
.finally(done);
});
it('should result in a failed test if the logging server cannot be reached', (done) => {
const badLoggingOT = {
...OT,
...{
properties: {
...OT.properties,
loggingURL: OT.properties.loggingURL.replace('tokbox', 'bad-tokbox')
}
}
};
const badLoggingNetworkTest = new NetworkTest(badLoggingOT, badLoggingCredentials)
badLoggingNetworkTest.testConnectivity()
.then((results: ConnectivityTestResults) => {
expect(results.failedTests).toBeInstanceOf(Array);
if (results.failedTests.find(f => f.type === 'logging')) {
done();
}
});
}, 10000);
it('should result in a failed test if the API server cannot be reached', (done) => {
const badApiOT = {
...OT,
...{
properties: {
...OT.properties,
apiURL: OT.properties.apiURL.replace('opentok', 'bad-opentok')
}
}
};
// Why is this necessary? (Is an old session still connected?)
OT.properties.apiURL = OT.properties.apiURL.replace('opentok', 'bad-opentok');
const badApiNetworkTest = new NetworkTest(badApiOT, badApiCredentials)
badApiNetworkTest.testConnectivity()
.then((results: ConnectivityTestResults) => {
expect(results.failedTests).toBeInstanceOf(Array);
if (results.failedTests.find(f => f.type === 'api')) {
done();
OT.properties.apiURL = OT.properties.apiURL.replace('bad-opentok', 'opentok');
}
OT.properties.apiURL = OT.properties.apiURL.replace('bad-opentok', 'opentok');
});
}, 10000);
});
describe('Quality Test', () => {
it('validates its onUpdate and onComplete callbacks', () => {
expect(() => networkTest.testQuality('callback').toThrow(new InvalidOnUpdateCallback()))
expect(() => networkTest.testQuality(validOnUpdateCallback, 'callback').toThrow(new InvalidOnCompleteCallback()))
expect(() => networkTest.testConnectivity(validOnUpdateCallback, validOnCompleteCallback).not.toThrowError(NetworkTestError))
});
it('should return an error if invalid session credentials are used', (done) => {
const validateResults = (results: QualityTestResults) => {
expect(results).toBe(undefined);
};
const validateError = (error?: QualityTestError) => {
expect(error).toBeInstanceOf(QualityTestSessionError);
};
badCredentialsNetworkTest.testQuality()
.then(validateResults)
.catch(validateError)
.finally(done);
});
it('should return valid test results or an error', (done) => {
const validateResults = (results: QualityTestResults) => {
const { mos, audio, video } = results;
expect(mos).toEqual(jasmine.any(Number));
expect(audio.bitrate).toEqual(jasmine.any(Number));
expect(audio.supported).toEqual(jasmine.any(Boolean));
expect(audio.reason || '').toEqual(jasmine.any(String));
expect(audio.packetLossRatio).toEqual(jasmine.any(Number));
expect(video.bitrate).toEqual(jasmine.any(Number));
expect(video.supported).toEqual(jasmine.any(Boolean));
expect(video.reason || '').toEqual(jasmine.any(String));
expect(video.packetLossRatio).toEqual(jasmine.any(Number));
expect(video.frameRate).toEqual(jasmine.any(Number));
expect(video.recommendedResolution).toEqual(jasmine.any(String));
expect(video.recommendedFrameRate).toEqual(jasmine.any(Number));
};
const validateError = (error?: QualityTestError) => {
expect(error).toBe(QualityTestError);
};
const onUpdate = (stats: Stats) => console.info('Subscriber stats:', stats);
networkTest.testQuality(onUpdate)
.then(validateResults)
.catch(validateError)
.finally(done);
}, 40000);
});
});
});
|
Otto-Health/opentok-network-test-js
|
src/NetworkTest/errors/index.ts
|
<filename>src/NetworkTest/errors/index.ts
/**
* @module Errors
*/
/**
* Base class for errors used throughout Network Connectivity tests.
*/
export class NetworkTestError extends Error {
constructor(message: string) {
super(message);
Object.setPrototypeOf(this, NetworkTestError.prototype);
this.name = this.constructor.name;
this.stack = (new Error(message)).stack;
}
}
export class MissingOpenTokInstanceError extends NetworkTestError {
constructor() {
super('An instance of OT, the OpenTok.js client SDK, is required.');
}
}
export class IncompleteSessionCredentialsError extends NetworkTestError {
constructor() {
super('NetworkConnectivity requires an apiKey, sessionId, and token.');
}
}
export class MissingSessionCredentialsError extends NetworkTestError {
constructor() {
super('NetworkConnectivity requires OpenTok session credentials.');
}
}
export class InvalidSessionCredentialsError extends NetworkTestError {
constructor() {
super('NetworkConnectivity session credentials must include an apiKey, sessionId, and token.');
}
}
export class InvalidOnUpdateCallback extends NetworkTestError {
constructor() {
super('The onUpdate callback must be a function that accepts a single parameter.');
}
}
export class InvalidOnCompleteCallback extends NetworkTestError {
constructor() {
super('The onComplete callback must be a function that accepts error and results parameters');
}
}
|
Otto-Health/opentok-network-test-js
|
src/types/analytics.d.ts
|
<gh_stars>0
/**
* Analytics
*/
type Config = {
sessionId: string
partnerId: string,
source: string,
clientVersion: string,
name: string,
componentId: string,
}
type SessionInfo = {
sessionId: string,
connectionId: string,
partnerId: string
};
type LogEvent = {
action: string,
variation: string
}
declare class OTKAnalytics {
constructor(config: Config);
addSessionInfo(info: SessionInfo): void;
logEvent(event: LogEvent): void;
}
declare module 'opentok-solutions-logging' {
type SessionInfo = { sessionId: string, connectionId: string, partnerId: string };
class OTKAnalytics {
constructor(options: {
sessionId: string,
partnerId: string,
source: string,
clientVersion: string,
name: string,
componentId: string,
});
addSessionInfo(info: SessionInfo): void;
logEvent(options: { action: string, variation: string }): void;
}
namespace OTKAnalytics { }
export = OTKAnalytics;
}
|
Otto-Health/opentok-network-test-js
|
src/NetworkTest/testQuality/errors/index.ts
|
/**
* @module Errors/Quality
*/
/**
* Define errors for Connectivity Test
*/
import { NetworkTestError } from '../../errors';
/**
* Base class for errors used throughout Network Quality test.
*/
export class QualityTestError extends NetworkTestError {
constructor(message: string) {
super(message);
Object.setPrototypeOf(this, QualityTestError.prototype);
this.name = this.constructor.name;
this.stack = (new Error(message)).stack;
}
}
/**
* Browser Error
*/
export class UnsupportedBrowserError extends QualityTestError {
name: string;
constructor(browser: string) {
const message =
`Your current browser (${browser}) does not support the audio-video quality test. Please run the test in Chrome or Firefox.`;
super(message);
Object.setPrototypeOf(this, UnsupportedBrowserError.prototype);
this.name = this.constructor.name;
}
}
/**
* Session Errors
*/
export class ConnectToSessionError extends QualityTestError {
name: string;
constructor(message?: string) {
const defaultMessage = 'Failed to connect to the session due to a network error.';
super(message || defaultMessage);
Object.setPrototypeOf(this, ConnectToSessionError.prototype);
this.name = this.constructor.name;
}
}
export class ConnectToSessionTokenError extends ConnectToSessionError {
constructor() {
super('Failed to connect to the session due to an invalid token.');
}
}
export class ConnectToSessionSessionIdError extends ConnectToSessionError {
constructor() {
super('Failed to connect to the session due to an invalid session ID.');
}
}
export class ConnectToSessionNetworkError extends ConnectToSessionError {
constructor() {
super('Failed to connect to the session due to a network error.');
}
}
/**
* Missing Device Errors
*/
export class MediaDeviceError extends QualityTestError {
name: string;
constructor(message?: string) {
const defaultMessage = 'OpenTok failed to find media devices for this browser.';
super(message || defaultMessage);
Object.setPrototypeOf(this, MediaDeviceError.prototype);
this.name = this.constructor.name;
}
}
export class FailedToObtainMediaDevices extends QualityTestError {
constructor() {
super('Failed to obtain media devices.');
}
}
export class NoVideoCaptureDevicesError extends QualityTestError {
constructor() {
super('This browser has no video capture devices');
}
}
export class NoAudioCaptureDevicesError extends QualityTestError {
constructor() {
super('This browser has no audio capture devices.');
}
}
/**
* Publisher Errors
*/
export class PublishToSessionError extends QualityTestError {
name: string;
constructor(message?: string) {
const defaultMessage = 'Encountered an unknown error while attempting to publish to a session.';
super(message || defaultMessage);
Object.setPrototypeOf(this, PublishToSessionError.prototype);
this.name = this.constructor.name;
}
}
export class InitPublisherError extends PublishToSessionError {
constructor(message?: string) {
super(message || 'Failed to initialize publisher.');
}
}
export class PublishToSessionNotConnectedError extends PublishToSessionError {
constructor() {
super('Precall failed to publish to the session because it was not connected.');
}
}
export class PublishToSessionPermissionOrTimeoutError extends PublishToSessionError {
constructor() {
super('Precall failed to publish to the session due a permissions error or timeout.');
}
}
/**
* Subscriber Errors
*/
export class SubscribeToSessionError extends QualityTestError {
constructor(message?: string) {
const defaultMessage = 'Encountered an unknown error while attempting to publish to a session.';
super(message || defaultMessage);
Object.setPrototypeOf(this, SubscribeToSessionError.prototype);
this.name = this.constructor.name;
}
}
export class SubscriberGetStatsError extends SubscribeToSessionError {
constructor() {
super('Failed to get network stats for a subscriber.');
}
}
export class MissingSubscriberError extends SubscribeToSessionError {
constructor() {
super('Call checkSubscribeToSession before calling checkSubscriberQuality.');
}
}
|
Otto-Health/opentok-network-test-js
|
src/NetworkTest/index.ts
|
<filename>src/NetworkTest/index.ts
/**
* @module NetworkTest
*/
/**
* Define Network Connectivy class
*/
const version = require('../../package.json').version;
import { testConnectivity, ConnectivityTestResults } from './testConnectivity';
import testQuality from './testQuality';
import {
IncompleteSessionCredentialsError,
InvalidOnCompleteCallback,
InvalidOnUpdateCallback,
MissingOpenTokInstanceError,
MissingSessionCredentialsError,
} from './errors';
import { getOr } from '../util';
/* tslint:disable */
const OTKAnalytics = require('opentok-solutions-logging');
/* tslint:enable */
export default class NetworkTest {
credentials: SessionCredentials;
OT: OpenTok;
otLogging: OTKAnalytics;
/**
* Returns an instance of NetworkConnectivity. See the "API reference" section of the
* README.md file in the root of the opentok-network-test-js project for details.
*/
constructor(OT: OpenTok, credentials: SessionCredentials) {
this.validateOT(OT);
this.validateCredentials(credentials);
this.otLogging = this.startLoggingEngine(credentials.apiKey, credentials.sessionId);
this.OT = OT;
this.credentials = credentials;
}
private validateOT(OT: OpenTok) {
if (!OT || typeof OT !== 'object' || !OT.initSession) {
throw new MissingOpenTokInstanceError();
}
}
private validateCredentials(credentials: SessionCredentials) {
if (!credentials) {
throw new MissingSessionCredentialsError();
}
if (!credentials.apiKey || !credentials.sessionId || !credentials.token) {
throw new IncompleteSessionCredentialsError();
}
}
private validateCallbacks(
action: string,
updateCallback?: UpdateCallback<any>,
onComplete?: CompletionCallback<any>) {
if (updateCallback) {
if (typeof updateCallback !== 'function' || updateCallback.length !== 1) {
this.otLogging.logEvent({ action, variation: 'Failure' });
throw new InvalidOnUpdateCallback();
}
}
if (onComplete) {
if (typeof onComplete !== 'function' || onComplete.length !== 2) {
this.otLogging.logEvent({ action, variation: 'Failure' });
throw new InvalidOnCompleteCallback();
}
}
}
private startLoggingEngine(apiKey: string, sessionId: string): OTKAnalytics {
return new OTKAnalytics({
sessionId,
partnerId: apiKey,
source: window.location.href,
clientVersion: 'js-network-test-' + version,
name: 'opentok-network-test',
componentId: 'opentok-network-test',
});
}
/**
* This method checks to see if the client can connect to TokBox servers required for
* using OpenTok.
*
* See the "API reference" section of the README.md file in the root of the
* opentok-network-test-js project for details.
*/
testConnectivity(
onComplete?: CompletionCallback<ConnectivityTestResults>): Promise<ConnectivityTestResults> {
this.otLogging.logEvent({ action: 'testConnectivity', variation: 'Attempt' });
this.validateCallbacks('testConnectivity', undefined, onComplete);
return testConnectivity(this.OT, this.credentials, this.otLogging, onComplete);
}
/**
* This function runs a test publisher and based on the measured video bitrate,
* audio bitrate, and the audio packet loss for the published stream, it returns
* results indicating the recommended supported publisher settings.
*
* See the "API reference" section of the README.md file in the root of the
* opentok-network-test-js project for details.
*/
testQuality(
updateCallback?: UpdateCallback<UpdateCallbackStats>,
completionCallback?: CompletionCallback<QualityTestResults>): Promise<any> {
this.otLogging.logEvent({ action: 'testQuality', variation: 'Attempt' });
this.validateCallbacks('testQuality', updateCallback, completionCallback);
return testQuality(
this.OT, this.credentials, this.otLogging, updateCallback, completionCallback);
}
}
|
Otto-Health/opentok-network-test-js
|
src/NetworkTest/testQuality/helpers/getLatestSampleWindow.ts
|
import config from './config';
import { getOr, last } from '../../../util';
export default function getLatestSampleWindow(stats: OT.SubscriberStats[]): OT.SubscriberStats[] {
const mostRecentTimestamp: number = getOr(0, 'timestamp', last(stats));
const oldestAllowedTime: number = mostRecentTimestamp - config.steadyStateSampleWindow;
return stats.filter((stat: OT.SubscriberStats) => stat.timestamp >= oldestAllowedTime);
}
|
Otto-Health/opentok-network-test-js
|
src/NetworkTest/testQuality/helpers/isBitrateSteadyState.ts
|
<reponame>Otto-Health/opentok-network-test-js<gh_stars>0
import getLatestSampleWindow from './getLatestSampleWindow';
import calculateQualityStats from './calculateQualityStats';
import config from './config';
export default function isBitrateSteadyState(statsList: OT.SubscriberStats[]): boolean {
const latestSamples = getLatestSampleWindow(statsList);
const steadyStateAllowedDelta = config.steadyStateAllowedDelta;
let isSteadyState = true;
if (latestSamples.length < config.minimumVideoAndAudioTestSampleSize) {
return false;
}
const statsBitrates = calculateQualityStats(latestSamples);
const avTypes: AV[] = ['video', 'audio'];
avTypes.forEach((avType: 'audio' | 'video') => {
for (let i = 1; i < statsBitrates[avType].length; i += 1) {
const currBitrate = statsBitrates[avType][i].averageBitrate;
const prevBitrate = statsBitrates[avType][i - 1].averageBitrate;
const bitrateDelta = currBitrate - prevBitrate;
const allowableBitrateDelta = (prevBitrate * steadyStateAllowedDelta);
if (bitrateDelta > allowableBitrateDelta) {
isSteadyState = false;
}
}
});
return isSteadyState;
}
|
Otto-Health/opentok-network-test-js
|
src/NetworkTest/testQuality/index.ts
|
<filename>src/NetworkTest/testQuality/index.ts
/**
* @module Test/Publishing
* @preferred
*
* Defines the methods required for the Publishing Test Flow
*/
/**
* Publishing Test Flow
*/
import * as Promise from 'promise';
import { get, getOr, pick } from '../../util';
import * as e from './errors/';
import { OTErrorType, errorHasName } from '../errors/types';
import subscriberMOS from './helpers/subscriberMOS';
import MOSState from './helpers/MOSState';
import config from './helpers/config';
import isSupportedBrowser from './helpers/isSupportedBrowser';
type QualityTestResultsBuilder = {
state: MOSState,
subscriber: OT.Subscriber,
credentials: SessionCredentials,
mosScore?: number,
bandwidth?: Bandwidth,
};
type MOSResultsCallback = (state: MOSState) => void;
let audioOnly = false; // The initial test is audio-video
/**
* If not already connected, connect to the OpenTok Session
*/
function connectToSession(session: OT.Session, token: string): Promise<OT.Session> {
return new Promise((resolve, reject) => {
if (session.connection) {
resolve(session);
} else {
session.connect(token, (error?: OT.OTError) => {
if (error) {
if (errorHasName(error, OTErrorType.AUTHENTICATION_ERROR)) {
reject(new e.ConnectToSessionTokenError());
} else if (errorHasName(error, OTErrorType.INVALID_SESSION_ID)) {
reject(new e.ConnectToSessionSessionIdError());
} else if (errorHasName(error, OTErrorType.CONNECT_FAILED)) {
reject(new e.ConnectToSessionNetworkError());
} else {
reject(new e.ConnectToSessionError());
}
}
resolve(session);
});
}
});
}
/**
* Ensure that audio and video devices are available
*/
function validateDevices(OT: OpenTok): Promise<void> {
return new Promise((resolve, reject) => {
type DeviceMap = { [deviceId: string]: OT.Device };
type AvailableDevices = { audio: DeviceMap, video: DeviceMap };
OT.getDevices((error?: OT.OTError, devices: OT.Device[] = []) => {
if (error) {
reject(new e.FailedToObtainMediaDevices());
} else {
const availableDevices: AvailableDevices = devices.reduce(
(acc: AvailableDevices, device: OT.Device) => {
const type: AV = device.kind === 'audioInput' ? 'audio' : 'video';
return { ...acc, [type]: { ...acc[type], [device.deviceId]: device } };
},
{ audio: {}, video: {} },
);
if (!Object.keys(availableDevices.audio).length) {
reject(new e.NoAudioCaptureDevicesError());
} else if (!Object.keys(availableDevices.video).length) {
reject(new e.NoVideoCaptureDevicesError());
} else {
resolve();
}
}
});
});
}
/**
* Create a test publisher and subscribe to the publihser's stream
*/
function publishAndSubscribe(OT: OpenTok) {
return (session: OT.Session): Promise<OT.Subscriber> =>
new Promise((resolve, reject) => {
type StreamCreatedEvent = OT.Event<'streamCreated', OT.Publisher> & { stream: OT.Stream };
const containerDiv = document.createElement('div');
containerDiv.style.position = 'fixed';
containerDiv.style.bottom = '-1px';
containerDiv.style.width = '1px';
containerDiv.style.height = '1px';
containerDiv.style.opacity = '0';
document.body.appendChild(containerDiv);
const publisherOptions: OT.PublisherProperties = {
resolution: '1280x720',
width: '100%',
height: '100%',
insertMode: 'append',
showControls: false,
};
validateDevices(OT)
.then(() => {
const publisher = OT.initPublisher(containerDiv, publisherOptions, (error?: OT.OTError) => {
if (error) {
reject(new e.InitPublisherError(error.message));
} else {
session.publish(publisher, (publishError?: OT.OTError) => {
if (publishError) {
if (errorHasName(publishError, OTErrorType.NOT_CONNECTED)) {
return reject(new e.PublishToSessionNotConnectedError());
}
if (errorHasName(publishError, OTErrorType.UNABLE_TO_PUBLISH)) {
return reject(new e.PublishToSessionPermissionOrTimeoutError());
}
return reject(new e.PublishToSessionError());
// return reject(new e.PublishToSessionError(publishError.message));
}
});
}
});
publisher.on('streamCreated', (event: StreamCreatedEvent) => {
const subscriber =
session.subscribe(event.stream,
containerDiv,
{ testNetwork: true, insertMode: 'append' },
(subscribeError?: OT.OTError) => {
return subscribeError ?
reject(new e.SubscribeToSessionError(subscribeError.message)) :
resolve(subscriber);
});
});
})
.catch(reject);
});
}
/**
* Connect to the OpenTok session, create a publisher, and subsribe to the publisher's stream
*/
function subscribeToTestStream(
OT: OpenTok,
session: OT.Session,
credentials: SessionCredentials): Promise<OT.Subscriber> {
return new Promise((resolve, reject) => {
connectToSession(session, credentials.token)
.then(publishAndSubscribe(OT))
.then(resolve)
.catch(reject);
});
}
function buildResults(builder: QualityTestResultsBuilder): QualityTestResults {
const baseProps: (keyof AverageStats)[] = ['bitrate', 'packetLossRatio', 'supported', 'reason'];
return {
mos: builder.state.qualityScore(),
audio: pick(baseProps, builder.state.stats.audio),
video: pick(baseProps.concat(['frameRate', 'recommendedResolution', 'recommendedFrameRate']),
builder.state.stats.video),
};
}
function isAudioQualityAcceptable(results: QualityTestResults): boolean {
return !!results.audio.bitrate && (results.audio.bitrate > config.qualityThresholds.audio[0].bps)
&& (!!results.audio.packetLossRatio &&
(results.audio.packetLossRatio < config.qualityThresholds.audio[0].plr)
|| results.audio.packetLossRatio === 0);
}
function checkSubscriberQuality(
OT: OpenTok,
session: OT.Session,
credentials: SessionCredentials,
onUpdate?: UpdateCallback<OT.SubscriberStats>): Promise<QualityTestResults> {
let mosEstimatorTimeoutId: number;
return new Promise((resolve, reject) => {
subscribeToTestStream(OT, session, credentials)
.then((subscriber: OT.Subscriber) => {
if (!subscriber) {
reject(new e.MissingSubscriberError());
} else {
try {
const builder: QualityTestResultsBuilder = {
state: new MOSState(),
... { subscriber },
... { credentials },
};
const getStatsListener = (error?: OT.OTError, stats?: OT.SubscriberStats) => {
const updateStats = (subscriberStats: OT.SubscriberStats): UpdateCallbackStats => ({
...subscriberStats,
phase: audioOnly ? 'audio-only' : 'audio-video',
});
stats && onUpdate && onUpdate(updateStats(stats));
};
const processResults = () => {
const audioVideoResults: QualityTestResults = buildResults(builder);
if (!audioOnly && !isAudioQualityAcceptable(audioVideoResults)) {
audioOnly = true;
checkSubscriberQuality(OT, session, credentials, onUpdate)
.then((results: QualityTestResults) => {
resolve(results);
});
} else {
session.on('sessionDisconnected', () => {
resolve(audioVideoResults);
session.off();
});
session.disconnect();
}
};
const resultsCallback: MOSResultsCallback = (state: MOSState) => {
clearTimeout(mosEstimatorTimeoutId);
processResults();
};
subscriberMOS(builder.state, subscriber, getStatsListener, resultsCallback);
mosEstimatorTimeoutId = window.setTimeout(processResults, audioOnly ? config.getStatsAudioOnlyDuration
: config.getStatsVideoAndAudioTestDuration);
} catch (exception) {
reject(new e.SubscriberGetStatsError());
}
}
})
.catch(reject);
});
}
/**
* Ensure that the test is being run in a supported browser.
*/
function validateBrowser(): Promise<void> {
return new Promise((resolve, reject) => {
const { supported, browser } = isSupportedBrowser();
return supported ? resolve() : reject(new e.UnsupportedBrowserError(browser));
});
}
/**
* This method checks to see if the client can publish to an OpenTok session.
*/
export default function testQuality(
OT: OpenTok,
credentials: SessionCredentials,
otLogging: OTKAnalytics,
onUpdate?: UpdateCallback<UpdateCallbackStats>,
onComplete?: CompletionCallback<QualityTestResults>): Promise<QualityTestResults> {
return new Promise((resolve, reject) => {
const onSuccess = (results: QualityTestResults) => {
onComplete && onComplete(undefined, results);
otLogging.logEvent({ action: 'testQuality', variation: 'Success' });
resolve(results);
};
const onError = (error: Error) => {
otLogging.logEvent({ action: 'testQuality', variation: 'Failure' });
onComplete && onComplete(error, null);
reject(error);
};
validateBrowser()
.then(() => {
const session = OT.initSession(credentials.apiKey, credentials.sessionId);
checkSubscriberQuality(OT, session, credentials, onUpdate)
.then(onSuccess)
.catch(onError);
})
.catch(onError);
});
}
|
Otto-Health/opentok-network-test-js
|
src/types/opentok.d.ts
|
/**
* @module Types/OpenTok
*/
/**
* OpenTok Client SDK types
*/
declare module OT {
type Properties = {
version: string,
buildHash: string,
debug: boolean,
websiteURL: string,
cdnURL: string,
loggingURL: string,
apiURL: string,
supportSSL: boolean,
cdnURLSSL: string,
loggingURLSSL: string,
apiURLSSL: string,
minimumVersion: { firefox: number, chrome: number },
sentryDSN: string,
enableErrorReporting: boolean,
assetURL: string,
cssURL: string
}
type OTError = {
name: string;
message: string;
};
type Dimensions = {
width: number;
height: number;
}
type ScreenSharingCapabilityResponse = {
extensionInstalled: boolean;
supported: boolean;
supportedSources: {
application: boolean;
screen: boolean;
window: boolean;
};
extensionRegistered?: string;
};
export function checkScreenSharingCapability(
callback: (response: ScreenSharingCapabilityResponse) => void
): void;
export function checkSystemRequirements(): number;
type Device = {
kind: 'audioInput' | 'videoInput';
deviceId: string;
label: string;
};
export function getDevices(
callback: (error: OTError | undefined, devices?: Device[]) => void
): void;
type WidgetStyle = {
audioLevelDisplayMode: 'auto' | 'on' | 'off';
backgroundImageURI: string;
buttonDisplayMode: 'auto' | 'on' | 'off';
nameDisplayMode: 'auto' | 'on' | 'off';
};
type WidgetProperties = {
fitMode?: 'cover' | 'contain';
insertDefaultUI?: boolean;
insertMode?: 'replace' | 'after' | 'before' | 'append';
showControls?: boolean;
width?: string | number;
height?: string | number;
};
type PublisherStyle = WidgetStyle & {
archiveStatusDisplayMode: 'auto' | 'off';
};
type PublisherProperties = WidgetProperties & {
audioBitrate?: number;
audioFallbackEnabled?: boolean;
audioSource?: string | null;
disableAudioProcessing?: boolean;
frameRate?: 30 | 15 | 7 | 1;
maxResolution?: Dimensions;
mirror?: boolean;
name?: string;
publishAudio?: boolean;
publishVideo?: boolean;
resolution?: (
'1280x960' |
'1280x720' |
'640x480' |
'640x360' |
'320x240' |
'320x180'
);
style?: Partial<PublisherStyle>;
usePreviousDeviceSelection?: boolean;
videoSource?: string | null;
};
type SubscriberStyle = WidgetStyle & {
videoDisabledDisplayMode: 'auto' | 'on' | 'off';
audioBlockedDisplayMode: 'auto' | 'on' | 'off';
};
type SubscriberProperties = WidgetProperties & {
audioVolume?: number;
preferredFrameRate?: number;
preferredResolution?: Dimensions;
style?: Partial<SubscriberStyle>;
subscribeToAudio?: boolean;
subscribeToVideo?: boolean;
testNetwork?: boolean;
};
export class Connection {
connectionId: string;
creationTime: number;
data: string;
}
export class Stream {
connection: Connection;
creationTime: number;
frameRate: number;
hasAudio: boolean;
hasVideo: boolean;
name: string;
streamId: string;
videoDimensions: {
width: number;
height: number;
};
videoType: 'camera' | 'screen';
}
type Event<Type, Target> = {
type: Type;
cancelable: boolean;
target: Target;
isDefaultPrevented(): boolean;
preventDefault(): void;
};
type VideoDimensionsChangedEvent<Target> = Event<'videoDimensionsChanged', Target> & {
oldValue: Dimensions;
newValue: Dimensions;
};
class OTEventEmitter<EventMap> {
on<EventName extends keyof EventMap>(
eventName: EventName,
callback: (event: EventMap[EventName]) => void
): void;
once<EventName extends keyof EventMap>(
eventName: EventName,
callback: (event: EventMap[EventName]) => void
): void;
off<EventName extends keyof EventMap>(
eventName?: EventName,
callback?: (event: EventMap[EventName]) => void
): void;
}
export class Publisher extends OTEventEmitter<{
accessAllowed: Event<'accessAllowed', Publisher>;
accessDenied: Event<'accessDenied', Publisher>;
accessDialogClosed: Event<'accessDialogClosed', Publisher>;
accessDialogOpened: Event<'accessDialogOpened', Publisher>;
audioLevelUpdated: Event<'audioLevelUpdated', Publisher> & {
audioLevel: number
};
destroyed: Event<'destroyed', Publisher>;
mediaStopped: Event<'mediaStopped', Publisher> & {
track: MediaStreamTrack | undefined
};
streamCreated: Event<'streamCreated', Publisher> & {
stream: Stream;
};
streamDestroyed: Event<'streamDestroyed', Publisher> & {
stream: Stream;
reason: string;
};
videoDimensionsChanged: VideoDimensionsChangedEvent<Publisher>;
videoElementCreated: Event<'videoElementCreated', Publisher> & {
element: HTMLVideoElement | HTMLObjectElement;
};
}> {
accessAllowed: boolean;
element?: HTMLElement | undefined;
id?: string;
stream?: Stream;
session?: Session;
destroy(): void;
getImgData(): string | null;
getStyle(): PublisherProperties;
publishAudio(value: boolean): void;
publishVideo(value: boolean): void;
setStyle<Style extends keyof PublisherStyle>(style: Style, value: PublisherStyle[Style]): void;
videoWidth(): number | undefined;
videoHeight(): number | undefined;
}
export function initPublisher(
targetElement?: HTMLElement | string,
properties?: PublisherProperties,
callback?: () => void
): Publisher;
export class Session extends OTEventEmitter<{
archiveStarted: Event<'archiveStarted', Session> & {
id: string;
name: string;
};
archiveStopped: Event<'archiveStopped', Session> & {
id: string;
name: string;
};
connectionCreated: Event<'connectionCreated', Session> & {
connection: Connection;
};
connectionDestroyed: Event<'connectionDestroyed', Session> & {
connection: Connection;
reason: string;
};
sessionConnected: Event<'sessionConnected', Session>;
sessionDisconnected: Event<'sessionDisconnected', Session> & {
reason: string;
};
sessionReconnected: Event<'sessionReconnected', Session>;
sessionReconnecting: Event<'sessionReconnecting', Session>;
signal: Event<'signal', Session> & {
type?: string;
data?: string;
from: Connection;
};
streamCreated: Event<'streamCreated', Session> & {
stream: Stream;
};
streamDestroyed: Event<'streamDestroyed', Session> & {
stream: Stream;
reason: string;
};
streamPropertyChanged: (
Event<'streamPropertyChanged', Session> & {
stream: Stream;
} & (
{ changedProperty: 'hasAudio'; oldValue: boolean; newValue: boolean; } |
{ changedProperty: 'hasVideo'; oldValue: boolean; newValue: boolean; } |
{ changedProperty: 'videoDimensions'; oldValue: Dimensions; newValue: Dimensions; }
)
);
}> {
capabilities: {
forceDisconnect: number;
forceUnpublish: number;
publish: number;
subscribe: number;
};
connection?: Connection;
sessionId: string;
connect(token: string, callback: (error?: OTError) => void): void;
disconnect(): void;
forceDisconnect(connection: Connection, callback: (error?: OTError) => void): void;
forceUnpublish(stream: Stream, callback: (error?: OTError) => void): void;
getPublisherForStream(stream: Stream): Publisher | undefined;
getSubscribersForStream(stream: Stream): [Subscriber];
publish(publisher: Publisher, callback: (error?: OTError) => void): void;
signal(
signal: { type?: string, data?: string, to?: Connection },
callback: (error?: OTError) => void
): void;
subscribe(
stream: Stream,
targetElement?: HTMLElement | string,
properties?: SubscriberProperties,
callback?: (error?: OTError) => void
): Subscriber;
unpublish(publisher: Publisher): void;
unsubscribe(subscriber: Subscriber): void;
}
export function initSession(
partnerId: string,
sessionId: string
): Session;
type TrackStats = {
bytesReceived: number;
packetsLost: number;
packetsReceived: number;
};
type SubscriberStats = {
audio: TrackStats;
video: TrackStats & { frameRate: number; };
timestamp: number;
}
export class SessionInfo {
get: (sessionId: string, token: string, connectionId: string) => Promise<void>
}
export class Subscriber extends OTEventEmitter<{
audioLevelUpdated: Event<'audioLevelUpdated', Subscriber> & {
audioLevel: number
};
connected: Event<'connected', Subscriber>;
destroyed: Event<'destroyed', Subscriber> & {
reason: string;
};
videoDimensionsChanged: VideoDimensionsChangedEvent<Subscriber>;
videoDisabled: Event<'videoDisabled', Subscriber> & {
reason: string;
};
videoDisableWarning: Event<'videoDisableWarning', Subscriber>;
videoDisableWarningLifted: Event<'videoDisableWarningLifted', Subscriber>;
videoElementCreated: Event<'videoElementCreated', Subscriber> & {
element: HTMLVideoElement | HTMLObjectElement;
};
videoEnabled: Event<'videoEnabled', Subscriber> & {
reason: string;
};
}> {
element?: HTMLElement;
id?: string;
stream?: Stream;
getAudioVolume(): number;
getImgData(): string | null;
getStats(callback: (error?: OTError, stats?: SubscriberStats) => void): void;
restrictFrameRate(value: boolean): void;
setAudioVolume(volume: number): void;
setPreferredFrameRate(frameRate: number): void;
setPreferredResolution(resolution: Dimensions): void;
setStyle<Style extends keyof SubscriberStyle>(
style: Style,
value: SubscriberStyle[Style]
): void;
videoHeight(): number | undefined;
videoWidth(): number | undefined;
}
export function registerScreenSharingExtension(
kind: string,
id: string,
version: number
): void;
export function reportIssue(callback: (error?: OTError, reportId?: string) => void): void;
export function setLogLevel(level: number): void;
export function upgradeSystemRequirements(): void;
}
|
Otto-Health/opentok-network-test-js
|
src/NetworkTest/testConnectivity/errors/mapping.ts
|
/**
* @module Errors/Connectivity/Mapping
*/
/**
* Map Connectivity Errors to Failure Types
*/
import { ConnectivityError } from './index';
export enum FailureType {
APIConnectivityError = 'api',
ConnectToSessionNetworkError = 'api',
ConnectToSessionError = 'messaging',
MediaDeviceError = 'OpenTok.js',
PublishToSessionError = 'media',
SubscribeToSessionError = 'media',
LoggingServerConnectionError = 'logging',
ConnectivityError = 'OpenTok.js',
}
export type FailureCase = {
type: FailureType,
error: ConnectivityError,
};
const mapErrorToCase = (error: ConnectivityError): FailureCase => {
const getType = (): FailureType => {
switch (error.name) {
case 'APIConnectivityError':
return FailureType[error.name];
case 'ConnectToSessionError':
return FailureType[error.name];
case 'ConnectToSessionNetworkError':
return FailureType[error.name];
case 'MediaDeviceError':
return FailureType[error.name];
case 'PublishToSessionError':
return FailureType[error.name];
case 'SubscribeToSessionError':
return FailureType[error.name];
case 'LoggingServerConnectionError':
return FailureType[error.name];
default:
return FailureType['ConnectivityError'];
}
};
return { error, type: getType() };
};
export const mapErrors = (...errors: ConnectivityError[]): FailureCase[] => errors.map(mapErrorToCase);
|
Otto-Health/opentok-network-test-js
|
src/types/index.d.ts
|
/**
* @module Types/NetworkTest
*/
/**
* Define global types
*/
interface OpenTok {
initSession: (partnerId: string, sessionId: string) => OT.Session;
initPublisher: (targetElement?: HTMLElement | string, properties?: OT.PublisherProperties, callback?: (error?: OT.OTError) => void) => OT.Publisher;
getDevices(callback: (error: OT.OTError | undefined, devices?: OT.Device[]) => void): void;
properties: OT.Properties
SessionInfo: OT.SessionInfo
}
type SessionCredentials = {
apiKey: string,
sessionId: string,
token: string
}
type CompletionCallback<A> = (error: Error | undefined, results: A | null) => void
type UpdateCallback<A> = (stats: OT.SubscriberStats) => void
type AV = 'audio' | 'video';
type TestQualityResults = {
mos: number,
audio: {
bitrate: number,
packetLoss: number,
supported: boolean,
reason?: string,
},
video: {
bitrate: number,
packetLoss: number,
frameRate: number,
recommendedFrameRate?: string,
recommendedResolution?: string,
supported: boolean,
reason?: string,
},
}
type InputDeviceType = 'audioInput' | 'videoInput';
/**
* Quality Test
*/
type UpdateCallbackStats = OT.SubscriberStats & { phase: string; };
interface HasAudioVideo<A> {
audio: A;
video: A;
}
interface QualityTestResults extends HasAudioVideo<AverageStats> {
mos: number;
}
interface AudioThreshold { bps: number, plr: number }
interface VideoThreshold extends AudioThreshold { recommendedSetting: string }
type StatsListener = (error?: OT.OTError, stats?: OT.SubscriberStats) => void;
interface Kbps { kbps: number }
interface KbpsMap extends HasAudioVideo<Kbps[]> {}
interface Bandwidth extends HasAudioVideo<number> {}
interface AverageStatsBase {
bitrate: number;
packetLossRatio: number;
}
interface AverageStats {
bitrate?: number;
packetLossRatio?: number;
supported?: boolean;
reason?: string;
frameRate?: number;
recommendedFrameRate?: number;
recommendedResolution?: string;
}
type QualityEvaluationResults = {
supported: boolean,
recommendedFrameRate?: number,
recommendedResolution?: string,
reason?: string,
};
type QualityStats = {
averageBitrate: number,
packetLossRatio: number,
frameRate?: number,
};
type StreamCreatedEvent = OT.Event<'streamCreated', OT.Publisher> & { stream: OT.Stream };
|
labDAO/applayer-rest-api-hub
|
src/apps/alphafold/docs.ts
|
<reponame>labDAO/applayer-rest-api-hub<gh_stars>1-10
const init_date = Date.now()
const submitted_at = new Date(init_date).toISOString()
const started_at = new Date(init_date + 10000).toISOString()
const completed_at = new Date(init_date + 20000).toISOString()
const docsData = {
application: {
name: 'alphafold'
},
endpoints: {
'/alphafold': {
description: 'return this documentation'
},
'/alphafold/submit': {
description: 'submit a new job',
method: 'POST',
payload_example: {
input: [{
name: 'gp47_tail',
sequence: 'MTANHLESPNCDWKNNRMAIVHMVNVTPLRMMEEPRAAVEAAFEGIMEPAVVGDMVEYWNKMISTCCNYYQMGSSRSHLEEKAQMVDRFWFCPCIYYASGKWRNMFLNILHVWGHHHYPRNDLKPCSYLSCKLPDLRIFFNHMQTCCHFVTLLFLTEWPTYMIYNSVDLCPMTIPRRNTCRTMTEVSSWCEPAIPEWWQATVKGGWMSTHTKFCWYPVLDPHHEYAESKMDTYGQCKKGGMVRCYKHKQQVWGNNHNESKAPCDDQPTYLCPPGEVYKGDHISKREAENMTNAWLGEDTHNFMEIMHCTAKMASTHFGSTTIYWAWGGHVRPAATWRVYPMIQEGSHCQC',
parameters: {
"max_template_date": "2022-01-01",
"mode": "monomer_single",
"weights_download_url": "https://storage.googleapis.com/alphafold/alphafold_params_2021-10-27.tar",
"db": "full",
"is_prokaryote": 0
}
}]
},
response_example: {
jobId: 12345,
input: [{
name: 'gp47_tail',
sequence: 'MTANHLESPNCDWKNNRMAIVHMVNVTPLRMMEEPRAAVEAAFEGIMEPAVVGDMVEYWNKMISTCCNYYQMGSSRSHLEEKAQMVDRFWFCPCIYYASGKWRNMFLNILHVWGHHHYPRNDLKPCSYLSCKLPDLRIFFNHMQTCCHFVTLLFLTEWPTYMIYNSVDLCPMTIPRRNTCRTMTEVSSWCEPAIPEWWQATVKGGWMSTHTKFCWYPVLDPHHEYAESKMDTYGQCKKGGMVRCYKHKQQVWGNNHNESKAPCDDQPTYLCPPGEVYKGDHISKREAENMTNAWLGEDTHNFMEIMHCTAKMASTHFGSTTIYWAWGGHVRPAATWRVYPMIQEGSHCQC',
parameters: {
"max_template_date": "2022-01-01",
"mode": "monomer_single",
"weights_download_url": "https://storage.googleapis.com/alphafold/alphafold_params_2021-10-27.tar",
"db": "full",
"is_prokaryote": 0
}
}],
accepted: true,
status: 'queued',
submitted_at: submitted_at,
started_at: null,
completed_at: null,
output: []
}
},
'/alphafold/status/:jobId': {
description: 'get the status of a job',
url: '/alphafold/status/12345',
method: 'GET',
response_example: {
jobId: 12345,
input: [{
name: 'gp47_tail',
sequence: 'MTANHLESPNCDWKNNRMAIVHMVNVTPLRMMEEPRAAVEAAFEGIMEPAVVGDMVEYWNKMISTCCNYYQMGSSRSHLEEKAQMVDRFWFCPCIYYASGKWRNMFLNILHVWGHHHYPRNDLKPCSYLSCKLPDLRIFFNHMQTCCHFVTLLFLTEWPTYMIYNSVDLCPMTIPRRNTCRTMTEVSSWCEPAIPEWWQATVKGGWMSTHTKFCWYPVLDPHHEYAESKMDTYGQCKKGGMVRCYKHKQQVWGNNHNESKAPCDDQPTYLCPPGEVYKGDHISKREAENMTNAWLGEDTHNFMEIMHCTAKMASTHFGSTTIYWAWGGHVRPAATWRVYPMIQEGSHCQC',
parameters: {
"max_template_date": "2022-01-01",
"mode": "monomer_single",
"weights_download_url": "https://storage.googleapis.com/alphafold/alphafold_params_2021-10-27.tar",
"db": "full",
"is_prokaryote": 0
}
}],
accepted: true,
status: 'completed',
submitted_at: submitted_at,
started_at: started_at,
completed_at: completed_at,
output: ["features.pkl", "msas", "ranked_0.pdb", "ranking_debug.json", "relaxed_model_1.pdb", "result_model_1.pkl", "timings.json", "unrelaxed_model_1.pdb"]
}
}
}
}
export default docsData
|
labDAO/applayer-rest-api-hub
|
src/index.ts
|
import corsify from './corsify'
import handleRequest from './handler'
addEventListener("fetch", (event) => {
event.respondWith(
corsify(event.request, handleRequest).catch(
(err) => new Response(err.stack, {
status: 500
})
)
)
})
|
labDAO/applayer-rest-api-hub
|
src/declarations.d.ts
|
<filename>src/declarations.d.ts
declare module 'cloudflare-worker-rest-api'
|
labDAO/applayer-rest-api-hub
|
src/apps/alphafold/api.ts
|
<gh_stars>1-10
import docsData from './docs'
export const docs = (_req: any, res: any) => {
return res.send(docsData)
}
export const submit = (_req: any, res: any) => {
return res.send(docsData.endpoints['/alphafold/submit'].response_example)
}
export const status = (req: any, res: any) => {
const { jobId } = req.params
if (!jobId) {
return res.send({ message: "error: jobId not provided" }, 400)
}
if (jobId !== '12345') {
return res.send({ message: "error: jobId does not exist" }, 400)
}
return res.send(docsData.endpoints['/alphafold/status/:jobId'].response_example)
}
|
labDAO/applayer-rest-api-hub
|
src/corsify.ts
|
<reponame>labDAO/applayer-rest-api-hub<gh_stars>1-10
// adapted from https://bartsolutions.github.io/2021/10/21/cloud-flare-cors-worker-setup/
const corsHeaders: HeadersInit = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET,HEAD,POST,OPTIONS",
"Access-Control-Max-Age": "86400",
}
function handleOptions(request: Request) {
// Make sure the necessary headers are present
// for this to be a valid pre-flight request
let headers = request.headers
if (
headers.get("Origin") !== null &&
headers.get("Access-Control-Request-Method") !== null &&
headers.get("Access-Control-Request-Headers") !== null
) {
// Handle CORS pre-flight request.
// If you want to check or reject the requested method + headers
// you can do that here.
let respHeaders = {
...corsHeaders,
// Allow all future content Request headers to go back to browser
// such as Authorization (Bearer) or X-Client-Name-Version
"Access-Control-Allow-Headers": request.headers.get("Access-Control-Request-Headers") || "",
}
return new Response(null, {
headers: new Headers(respHeaders),
})
} else {
// Handle standard OPTIONS request.
// If you want to allow other HTTP Methods, you can do that here.
return new Response(null, {
headers: {
Allow: "GET, HEAD, POST, OPTIONS",
},
})
}
}
export default async function corsify(request: Request, handleRequest: { (request: Request): Promise<Response>; }) {
let response
if (request.method === "OPTIONS") {
response = handleOptions(request)
} else {
response = await handleRequest(request)
response = new Response(response.body, response)
response.headers.set("Access-Control-Allow-Origin", "*")
response.headers.set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
}
return response
}
|
labDAO/applayer-rest-api-hub
|
test/apps/alphafold.test.ts
|
import handleRequest from '../../src/handler'
import makeServiceWorkerEnv from 'service-worker-mock'
import docsData from '../../src/apps/alphafold/docs'
declare var global: any
const setup = () => {
Object.assign(global, makeServiceWorkerEnv())
jest.resetModules()
}
describe('/alphafold', () => {
beforeEach(setup)
test('handle GET /', async () => {
const result = await handleRequest(new Request('/alphafold', { method: 'GET' }))
expect(result.status).toEqual(200)
const text = await result.text()
expect(text).toEqual(JSON.stringify(docsData))
})
})
describe('/alphafold/submit', () => {
beforeEach(setup)
test('handle POST /submit', async () => {
const result = await handleRequest(new Request('/alphafold/submit', { method: 'POST', body: JSON.stringify(docsData.endpoints['/alphafold/submit'].payload_example)}))
expect(result.status).toEqual(200)
const text = await result.text()
expect(text).toEqual(JSON.stringify(docsData.endpoints['/alphafold/submit'].response_example))
})
})
describe('/alphafold/status/:jobId', () => {
beforeEach(setup)
test('handle GET /status/:jobId', async () => {
const status = await handleRequest(new Request('/alphafold/status/12345', { method: 'GET' }))
expect(status.status).toEqual(200)
const text = await status.text()
expect(text).toEqual(JSON.stringify(docsData.endpoints['/alphafold/status/:jobId'].response_example))
})
})
|
labDAO/applayer-rest-api-hub
|
src/handler.ts
|
import App from 'cloudflare-worker-rest-api'
import { docs, submit, status } from './apps/alphafold/api'
const app = new App('/')
app.get('/', (_req: any, res: { send: (arg0: { status: string }) => any }) => {
return res.send({ status: 'live' })
})
app.get('/alphafold', docs)
app.post('/alphafold/submit', submit)
app.get('/alphafold/status/:jobId', status)
export default async function handleRequest(request: any): Promise<Response> {
return app.handleRequest(request)
}
|
sarahM0/cts
|
src/common/runtime/helper/sys.ts
|
<reponame>sarahM0/cts
/* eslint no-process-exit: "off" */
/* eslint @typescript-eslint/no-namespace: "off" */
function node() {
const { existsSync } = require('fs');
return {
type: 'node',
existsSync,
args: process.argv.slice(2),
cwd: process.cwd,
exit: process.exit,
};
}
declare global {
namespace Deno {
function readFileSync(path: string): Uint8Array;
const args: string[];
const cwd: () => string;
function exit(code?: number): never;
}
}
function deno() {
function existsSync(path: string) {
try {
Deno.readFileSync(path);
return true;
} catch (err) {
return false;
}
}
return {
type: 'deno',
existsSync,
args: Deno.args,
cwd: Deno.cwd,
exit: Deno.exit,
};
}
const sys = typeof globalThis.process !== 'undefined' ? node() : deno();
export default sys;
|
sarahM0/cts
|
src/webgpu/idl/exposed.html.ts
|
// WPT-specific test checking that WebGPU is available iff isSecureContext.
import { assert } from '../../common/util/util.js';
// TODO: Test all WebGPU interfaces.
const items = [
globalThis.navigator.gpu,
globalThis.GPU,
globalThis.GPUAdapter,
globalThis.GPUDevice,
globalThis.GPUBuffer,
globalThis.GPUBufferUsage,
globalThis.GPUCommandEncoder,
globalThis.GPUCommandBuffer,
globalThis.GPUComputePassEncoder,
globalThis.GPURenderPipeline,
globalThis.GPUDeviceLostInfo,
globalThis.GPUValidationError,
];
for (const item of items) {
if (globalThis.isSecureContext) {
assert(item !== undefined, 'Item/interface should be exposed on secure context');
} else {
assert(item === undefined, 'Item/interface should not be exposed on insecure context');
}
}
|
sarahM0/cts
|
src/webgpu/api/validation/createSampler.spec.ts
|
export const description = `
createSampler validation tests.
`;
import { makeTestGroup } from '../../../common/framework/test_group.js';
import { ValidationTest } from './validation_test.js';
export const g = makeTestGroup(ValidationTest);
g.test('lodMinAndMaxClamp')
.desc('test different combinations of min and max clamp values')
.paramsSubcasesOnly(u =>
u //
.combine('lodMinClamp', [-4e-30, -1, 0, 0.5, 1, 10, 4e30])
.combine('lodMaxClamp', [-4e-30, -1, 0, 0.5, 1, 10, 4e30])
)
.fn(async t => {
t.expectValidationError(() => {
t.device.createSampler({
lodMinClamp: t.params.lodMinClamp,
lodMaxClamp: t.params.lodMaxClamp,
});
}, t.params.lodMinClamp > t.params.lodMaxClamp || t.params.lodMinClamp < 0 || t.params.lodMaxClamp < 0);
});
g.test('maxAnisotropy')
.desc('test different maxAnisotropy values and combinations with min/mag/mipmapFilter')
.params(u =>
u //
.beginSubcases()
.combineWithParams([
...u.combine('maxAnisotropy', [-1, undefined, 0, 1, 2, 4, 7, 16, 32, 33, 1024]),
{ minFilter: 'nearest' as const },
{ magFilter: 'nearest' as const },
{ mipmapFilter: 'nearest' as const },
])
)
.fn(async t => {
const {
maxAnisotropy = 1,
minFilter = 'linear',
magFilter = 'linear',
mipmapFilter = 'linear',
} = t.params as {
maxAnisotropy?: number;
minFilter?: GPUFilterMode;
magFilter?: GPUFilterMode;
mipmapFilter?: GPUFilterMode;
};
t.expectValidationError(() => {
t.device.createSampler({
minFilter,
magFilter,
mipmapFilter,
maxAnisotropy,
});
}, maxAnisotropy < 1 || (maxAnisotropy > 1 && !(minFilter === 'linear' && magFilter === 'linear' && mipmapFilter === 'linear')));
});
|
sarahM0/cts
|
src/webgpu/api/operation/memory_sync/buffer/rw_and_wr.spec.ts
|
<reponame>sarahM0/cts
export const description = `
Memory Synchronization Tests for Buffer: read before write and read after write.
- Create a single buffer and initialize it to 0, wait on the fence to ensure the data is initialized.
Write a number (say 1) into the buffer via render pass, compute pass, copy or writeBuffer.
Read the data and use it in render, compute, or copy.
Wait on another fence, then call expectContents to verify the written buffer.
This is a read-after write test but if the write and read operations are reversed, it will be a read-before-write test.
- x= write op: {storage buffer in {compute, render, render-via-bundle}, t2b copy dst, b2b copy dst, writeBuffer}
- x= read op: {index buffer, vertex buffer, indirect buffer, uniform buffer, {readonly, readwrite} storage buffer in {compute, render, render-via-bundle}, b2b copy src, b2t copy src}
- x= read-write sequence: {read then write, write then read}
- if pass type is the same, x= {single pass, separate passes} (note: render has loose guarantees)
- if not single pass, x= writes in {same cmdbuf, separate cmdbufs, separate submits, separate queues}
TODO: Tests with more than one buffer to try to stress implementations a little bit more.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { BufferSyncTest } from './buffer_sync_test.js';
export const g = makeTestGroup(BufferSyncTest);
|
sarahM0/cts
|
src/stress/queries/resolve.spec.ts
|
<gh_stars>10-100
export const description = `
Stress tests for query resolution.
`;
import { makeTestGroup } from '../../common/framework/test_group.js';
import { GPUTest } from '../../webgpu/gpu_test.js';
export const g = makeTestGroup(GPUTest);
g.test('many_large_sets')
.desc(
`Tests a huge number of resolveQuerySet operations on a huge number of
query sets between render passes.`
)
.unimplemented();
|
sarahM0/cts
|
src/common/internal/util.ts
|
<filename>src/common/internal/util.ts
/**
* Error without a stack, which can be used to fatally exit from `tool/` scripts with a
* user-friendly message (and no confusing stack).
*/
export class StacklessError extends Error {
constructor(message: string) {
super(message);
this.stack = undefined;
}
}
|
sarahM0/cts
|
src/common/internal/query/validQueryPart.ts
|
<reponame>sarahM0/cts<filename>src/common/internal/query/validQueryPart.ts
/** Applies to group parts, test parts, params keys. */
export const validQueryPart = /^[a-zA-Z0-9_]+$/;
|
sarahM0/cts
|
src/common/runtime/helper/options.ts
|
<gh_stars>10-100
let windowURL: URL | undefined = undefined;
function getWindowURL() {
if (windowURL === undefined) {
windowURL = new URL(window.location.toString());
}
return windowURL;
}
export function optionEnabled(
opt: string,
searchParams: URLSearchParams = getWindowURL().searchParams
): boolean {
const val = searchParams.get(opt);
return val !== null && val !== '0';
}
|
sarahM0/cts
|
src/common/internal/stack.ts
|
// Returns the stack trace of an Error, but without the extra boilerplate at the bottom
// (e.g. RunCaseSpecific, processTicksAndRejections, etc.), for logging.
export function extractImportantStackTrace(e: Error): string {
let stack = e.stack;
if (!stack) {
return '';
}
const redundantMessage = 'Error: ' + e.message + '\n';
if (stack.startsWith(redundantMessage)) {
stack = stack.substring(redundantMessage.length);
}
const lines = stack.split('\n');
for (let i = lines.length - 1; i >= 0; --i) {
const line = lines[i];
if (line.indexOf('.spec.') !== -1) {
return lines.slice(0, i + 1).join('\n');
}
}
return stack;
}
// *** Examples ***
//
// Node fail()
// > Error:
// > at CaseRecorder.fail (/Users/kainino/src/cts/src/common/framework/logger.ts:99:30)
// > at RunCaseSpecific.exports.g.test.t [as fn] (/Users/kainino/src/cts/src/unittests/logger.spec.ts:80:7)
// x at RunCaseSpecific.run (/Users/kainino/src/cts/src/common/framework/test_group.ts:121:18)
// x at processTicksAndRejections (internal/process/task_queues.js:86:5)
//
// Node throw
// > Error: hello
// > at RunCaseSpecific.g.test.t [as fn] (/Users/kainino/src/cts/src/unittests/test_group.spec.ts:51:11)
// x at RunCaseSpecific.run (/Users/kainino/src/cts/src/common/framework/test_group.ts:121:18)
// x at processTicksAndRejections (internal/process/task_queues.js:86:5)
//
// Firefox fail()
// > fail@http://localhost:8080/out/framework/logger.js:104:30
// > expect@http://localhost:8080/out/framework/default_fixture.js:59:16
// > @http://localhost:8080/out/unittests/util.spec.js:35:5
// x run@http://localhost:8080/out/framework/test_group.js:119:18
//
// Firefox throw
// > @http://localhost:8080/out/unittests/test_group.spec.js:48:11
// x run@http://localhost:8080/out/framework/test_group.js:119:18
//
// Safari fail()
// > fail@http://localhost:8080/out/framework/logger.js:104:39
// > expect@http://localhost:8080/out/framework/default_fixture.js:59:20
// > http://localhost:8080/out/unittests/util.spec.js:35:11
// x http://localhost:8080/out/framework/test_group.js:119:20
// x asyncFunctionResume@[native code]
// x [native code]
// x promiseReactionJob@[native code]
//
// Safari throw
// > http://localhost:8080/out/unittests/test_group.spec.js:48:20
// x http://localhost:8080/out/framework/test_group.js:119:20
// x asyncFunctionResume@[native code]
// x [native code]
// x promiseReactionJob@[native code]
//
// Chrome fail()
// x Error
// x at CaseRecorder.fail (http://localhost:8080/out/framework/logger.js:104:30)
// x at DefaultFixture.expect (http://localhost:8080/out/framework/default_fixture.js:59:16)
// > at RunCaseSpecific.fn (http://localhost:8080/out/unittests/util.spec.js:35:5)
// x at RunCaseSpecific.run (http://localhost:8080/out/framework/test_group.js:119:18)
// x at async runCase (http://localhost:8080/out/runtime/standalone.js:37:17)
// x at async http://localhost:8080/out/runtime/standalone.js:102:7
//
// Chrome throw
// x Error: hello
// > at RunCaseSpecific.fn (http://localhost:8080/out/unittests/test_group.spec.js:48:11)
// x at RunCaseSpecific.run (http://localhost:8080/out/framework/test_group.js:119:18)"
// x at async Promise.all (index 0)
// x at async TestGroupTest.run (http://localhost:8080/out/unittests/test_group_test.js:6:5)
// x at async RunCaseSpecific.fn (http://localhost:8080/out/unittests/test_group.spec.js:53:15)
// x at async RunCaseSpecific.run (http://localhost:8080/out/framework/test_group.js:119:7)
// x at async runCase (http://localhost:8080/out/runtime/standalone.js:37:17)
// x at async http://localhost:8080/out/runtime/standalone.js:102:7
|
sarahM0/cts
|
src/webgpu/api/operation/buffers/map_oom.spec.ts
|
<gh_stars>10-100
export const description =
'Test out-of-memory conditions creating large mappable/mappedAtCreation buffers.';
import { kUnitCaseParamsBuilder } from '../../../../common/framework/params_builder.js';
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { kBufferUsages } from '../../../capability_info.js';
import { GPUTest } from '../../../gpu_test.js';
import { kMaxSafeMultipleOf8 } from '../../../util/math.js';
const oomAndSizeParams = kUnitCaseParamsBuilder
.combine('oom', [false, true])
.expand('size', ({ oom }) => {
return oom
? [
kMaxSafeMultipleOf8,
0x20_0000_0000, // 128 GB
]
: [16];
});
export const g = makeTestGroup(GPUTest);
g.test('mapAsync')
.desc(
`Test creating a large mappable buffer should produce an out-of-memory error if allocation fails.
- The resulting buffer is an error buffer, so mapAsync rejects and produces a validation error.
- Calling getMappedRange should throw an OperationError because the buffer is not in the mapped state.
- unmap() throws an OperationError if mapping failed, and otherwise should detach the ArrayBuffer.
`
)
.params(
oomAndSizeParams //
.beginSubcases()
.combine('write', [false, true])
)
.fn(async t => {
const { oom, write, size } = t.params;
const buffer = t.expectGPUError(
'out-of-memory',
() =>
t.device.createBuffer({
size,
usage: write ? GPUBufferUsage.MAP_WRITE : GPUBufferUsage.MAP_READ,
}),
oom
);
const promise = t.expectGPUError(
'validation', // Should be a validation error since the buffer is invalid.
() => buffer.mapAsync(write ? GPUMapMode.WRITE : GPUMapMode.READ),
oom
);
if (oom) {
// Should also reject in addition to the validation error.
t.shouldReject('OperationError', promise);
// Should throw an OperationError because the buffer is not mapped.
// Note: not a RangeError because the state of the buffer is checked first.
t.shouldThrow('OperationError', () => {
buffer.getMappedRange();
});
// Should be a validation error since the buffer failed to be mapped.
t.expectGPUError('validation', () => buffer.unmap());
} else {
await promise;
const arraybuffer = buffer.getMappedRange();
t.expect(arraybuffer.byteLength === size);
buffer.unmap();
t.expect(arraybuffer.byteLength === 0, 'Mapping should be detached');
}
});
g.test('mappedAtCreation,full_getMappedRange')
.desc(
`Test creating a very large buffer mappedAtCreation buffer should produce
an out-of-memory error if allocation fails.
- Because the buffer can be immediately mapped, getMappedRange throws an OperationError only
because such a large ArrayBuffer cannot be created.
- unmap() should not throw.
`
)
.params(
oomAndSizeParams //
.beginSubcases()
.combine('usage', kBufferUsages)
)
.fn(async t => {
const { oom, usage, size } = t.params;
const buffer = t.expectGPUError(
'out-of-memory',
() => t.device.createBuffer({ mappedAtCreation: true, size, usage }),
oom
);
const f = () => buffer.getMappedRange();
let mapping: ArrayBuffer | undefined = undefined;
if (oom) {
// getMappedRange is normally valid on OOM buffers, but this one fails because the
// (default) range is too large to create the returned ArrayBuffer.
t.shouldThrow('RangeError', f);
} else {
mapping = f();
}
// Should be valid because buffer is mapped, regardless of OOM.
buffer.unmap();
if (mapping !== undefined) {
t.expect(mapping.byteLength === 0, 'Mapping should be detached');
}
});
g.test('mappedAtCreation,smaller_getMappedRange')
.desc(
`Test creating a very large mappedAtCreation buffer should produce
an out-of-memory error if allocation fails.
- Because the buffer can be immediately mapped, getMappedRange does not throw an OperationError. Calling it on a small range of the buffer successfully returns an ArrayBuffer.
- unmap() should detach the ArrayBuffer.
`
)
.params(
oomAndSizeParams //
.beginSubcases()
.combine('usage', kBufferUsages)
)
.fn(async t => {
const { oom, usage, size } = t.params;
const buffer = t.expectGPUError(
'out-of-memory',
() => t.device.createBuffer({ mappedAtCreation: true, size, usage }),
oom
);
// Note: It is always valid to get mapped ranges of a GPUBuffer that is mapped at creation,
// even if it is invalid, because the Content timeline might not know it is invalid.
// Should be valid because mappedAtCreation was set, regardless of OOM.
const mapping = buffer.getMappedRange(0, 16);
t.expect(mapping.byteLength === 16);
// Should be valid because buffer is mapped, regardless of OOM.
buffer.unmap();
t.expect(mapping.byteLength === 0, 'Mapping should be detached');
});
|
sarahM0/cts
|
src/webgpu/shader/execution/builtin/any.spec.ts
|
export const description = `
Execution Tests for the 'any' builtin function
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
import { False, True, TypeBool, TypeVec, vec2, vec3, vec4 } from '../../../util/conversion.js';
import { run } from './builtin.js';
export const g = makeTestGroup(GPUTest);
g.test('logical_builtin_functions,vector_any')
.uniqueId('ac2b3a100379d70f')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#logical-builtin-functions')
.desc(
`
vector any:
e: vecN<bool> any(e): bool Returns true if any component of e is true. (OpAny)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u =>
u
.combine('storageClass', ['uniform', 'storage_r', 'storage_rw'] as const)
.combine('overload', ['scalar', 'vec2', 'vec3', 'vec4'] as const)
)
.fn(async t => {
const overloads = {
scalar: {
type: TypeBool,
cases: [
{ input: False, expected: False },
{ input: True, expected: True },
],
},
vec2: {
type: TypeVec(2, TypeBool),
cases: [
{ input: vec2(False, False), expected: False },
{ input: vec2(True, False), expected: True },
{ input: vec2(False, True), expected: True },
{ input: vec2(True, True), expected: True },
],
},
vec3: {
type: TypeVec(3, TypeBool),
cases: [
{ input: vec3(False, False, False), expected: False },
{ input: vec3(True, False, False), expected: True },
{ input: vec3(False, True, False), expected: True },
{ input: vec3(True, True, False), expected: True },
{ input: vec3(False, False, True), expected: True },
{ input: vec3(True, False, True), expected: True },
{ input: vec3(False, True, True), expected: True },
{ input: vec3(True, True, True), expected: True },
],
},
vec4: {
type: TypeVec(4, TypeBool),
cases: [
{ input: vec4(False, False, False, False), expected: False },
{ input: vec4(False, True, False, False), expected: True },
{ input: vec4(False, False, True, False), expected: True },
{ input: vec4(False, True, True, False), expected: True },
{ input: vec4(False, False, False, True), expected: True },
{ input: vec4(False, True, False, True), expected: True },
{ input: vec4(False, False, True, True), expected: True },
{ input: vec4(False, True, True, True), expected: True },
{ input: vec4(True, False, False, False), expected: True },
{ input: vec4(True, False, False, True), expected: True },
{ input: vec4(True, False, True, False), expected: True },
{ input: vec4(True, False, True, True), expected: True },
{ input: vec4(True, True, False, False), expected: True },
{ input: vec4(True, True, False, True), expected: True },
{ input: vec4(True, True, True, False), expected: True },
{ input: vec4(True, True, True, True), expected: True },
],
},
};
const overload = overloads[t.params.overload];
run(t, 'any', [overload.type], TypeBool, t.params, overload.cases);
});
|
sarahM0/cts
|
src/webgpu/util/unions.ts
|
/**
* Reifies a `GPUExtent3D` into a `Required<GPUExtent3DDict>`.
*/
export function reifyExtent3D(
val: Readonly<GPUExtent3DDict> | Iterable<number>
): Required<GPUExtent3DDict> {
// TypeScript doesn't seem to want to narrow the types here properly, so hack around it.
if (typeof (val as Iterable<number>)[Symbol.iterator] === 'function') {
const v = Array.from(val as Iterable<number>);
return { width: v[0] ?? 1, height: v[1] ?? 1, depthOrArrayLayers: v[2] ?? 1 };
} else {
const v = val as Readonly<GPUExtent3DDict>;
return {
width: v.width ?? 1,
height: v.height ?? 1,
depthOrArrayLayers: v.depthOrArrayLayers ?? 1,
};
}
}
|
sarahM0/cts
|
src/unittests/logger.spec.ts
|
export const description = `
Unit tests for namespaced logging system.
Also serves as a larger test of async test functions, and of the logging system.
`;
import { SkipTestCase } from '../common/framework/fixture.js';
import { makeTestGroup } from '../common/framework/test_group.js';
import { Logger } from '../common/internal/logging/logger.js';
import { assert } from '../common/util/util.js';
import { UnitTest } from './unit_test.js';
export const g = makeTestGroup(UnitTest);
g.test('construct').fn(t => {
const mylog = new Logger({ overrideDebugMode: true });
const [, res1] = mylog.record('one');
const [, res2] = mylog.record('two');
t.expect(mylog.results.get('one') === res1);
t.expect(mylog.results.get('two') === res2);
t.expect(res1.logs === undefined);
t.expect(res1.status === 'running');
t.expect(res1.timems < 0);
t.expect(res2.logs === undefined);
t.expect(res2.status === 'running');
t.expect(res2.timems < 0);
});
g.test('empty').fn(t => {
const mylog = new Logger({ overrideDebugMode: true });
const [rec, res] = mylog.record('one');
rec.start();
t.expect(res.status === 'running');
rec.finish();
t.expect(res.status === 'pass');
t.expect(res.timems >= 0);
});
g.test('pass').fn(t => {
const mylog = new Logger({ overrideDebugMode: true });
const [rec, res] = mylog.record('one');
rec.start();
rec.debug(new Error('hello'));
t.expect(res.status === 'running');
rec.finish();
t.expect(res.status === 'pass');
t.expect(res.timems >= 0);
});
g.test('skip').fn(t => {
const mylog = new Logger({ overrideDebugMode: true });
const [rec, res] = mylog.record('one');
rec.start();
rec.skipped(new SkipTestCase());
rec.debug(new Error('hello'));
rec.finish();
t.expect(res.status === 'skip');
t.expect(res.timems >= 0);
});
g.test('warn').fn(t => {
const mylog = new Logger({ overrideDebugMode: true });
const [rec, res] = mylog.record('one');
rec.start();
rec.warn(new Error('hello'));
rec.skipped(new SkipTestCase());
rec.finish();
t.expect(res.status === 'warn');
t.expect(res.timems >= 0);
});
g.test('fail,expectationFailed').fn(t => {
const mylog = new Logger({ overrideDebugMode: true });
const [rec, res] = mylog.record('one');
rec.start();
rec.expectationFailed(new Error('bye'));
rec.warn(new Error());
rec.skipped(new SkipTestCase());
rec.finish();
t.expect(res.status === 'fail');
t.expect(res.timems >= 0);
});
g.test('fail,validationFailed').fn(t => {
const mylog = new Logger({ overrideDebugMode: true });
const [rec, res] = mylog.record('one');
rec.start();
rec.validationFailed(new Error('bye'));
rec.warn(new Error());
rec.skipped(new SkipTestCase());
rec.finish();
t.expect(res.status === 'fail');
t.expect(res.timems >= 0);
});
g.test('fail,threw').fn(t => {
const mylog = new Logger({ overrideDebugMode: true });
const [rec, res] = mylog.record('one');
rec.start();
rec.threw(new Error('bye'));
rec.warn(new Error());
rec.skipped(new SkipTestCase());
rec.finish();
t.expect(res.status === 'fail');
t.expect(res.timems >= 0);
});
g.test('debug')
.paramsSimple([
{ debug: true, _logsCount: 5 }, //
{ debug: false, _logsCount: 3 },
])
.fn(t => {
const { debug, _logsCount } = t.params;
const mylog = new Logger({ overrideDebugMode: debug });
const [rec, res] = mylog.record('one');
rec.start();
rec.debug(new Error('hello'));
rec.expectationFailed(new Error('bye'));
rec.warn(new Error());
rec.skipped(new SkipTestCase());
rec.debug(new Error('foo'));
rec.finish();
t.expect(res.status === 'fail');
t.expect(res.timems >= 0);
assert(res.logs !== undefined);
t.expect(res.logs.length === _logsCount);
});
|
sarahM0/cts
|
src/webgpu/api/operation/render_pipeline/entry_point_name.spec.ts
|
export const description = `
TODO:
- Test some weird but valid values for entry point name (both module and pipeline creation
should succeed).
- Test using each of many entry points in the module (should succeed).
- Test using an entry point with the wrong stage (should fail).
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
|
sarahM0/cts
|
src/common/internal/query/compare.ts
|
import { TestParams } from '../../framework/fixture.js';
import { assert, objectEquals } from '../../util/util.js';
import { paramKeyIsPublic } from '../params_utils.js';
import { TestQuery } from './query.js';
export const enum Ordering {
Unordered,
StrictSuperset,
Equal,
StrictSubset,
}
/**
* Compares two queries for their ordering (which is used to build the tree).
*
* See src/unittests/query_compare.spec.ts for examples.
*/
export function compareQueries(a: TestQuery, b: TestQuery): Ordering {
if (a.suite !== b.suite) {
return Ordering.Unordered;
}
const filePathOrdering = comparePaths(a.filePathParts, b.filePathParts);
if (filePathOrdering !== Ordering.Equal || a.isMultiFile || b.isMultiFile) {
return compareOneLevel(filePathOrdering, a.isMultiFile, b.isMultiFile);
}
assert('testPathParts' in a && 'testPathParts' in b);
const testPathOrdering = comparePaths(a.testPathParts, b.testPathParts);
if (testPathOrdering !== Ordering.Equal || a.isMultiTest || b.isMultiTest) {
return compareOneLevel(testPathOrdering, a.isMultiTest, b.isMultiTest);
}
assert('params' in a && 'params' in b);
const paramsPathOrdering = comparePublicParamsPaths(a.params, b.params);
if (paramsPathOrdering !== Ordering.Equal || a.isMultiCase || b.isMultiCase) {
return compareOneLevel(paramsPathOrdering, a.isMultiCase, b.isMultiCase);
}
return Ordering.Equal;
}
/**
* Compares a single level of a query.
*
* "IsBig" means the query is big relative to the level, e.g. for test-level:
* - Anything >= `suite:a,*` is big
* - Anything <= `suite:a:*` is small
*/
function compareOneLevel(ordering: Ordering, aIsBig: boolean, bIsBig: boolean): Ordering {
assert(ordering !== Ordering.Equal || aIsBig || bIsBig);
if (ordering === Ordering.Unordered) return Ordering.Unordered;
if (aIsBig && bIsBig) return ordering;
if (!aIsBig && !bIsBig) return Ordering.Unordered; // Equal case is already handled
// Exactly one of (a, b) is big.
if (aIsBig && ordering !== Ordering.StrictSubset) return Ordering.StrictSuperset;
if (bIsBig && ordering !== Ordering.StrictSuperset) return Ordering.StrictSubset;
return Ordering.Unordered;
}
function comparePaths(a: readonly string[], b: readonly string[]): Ordering {
const shorter = Math.min(a.length, b.length);
for (let i = 0; i < shorter; ++i) {
if (a[i] !== b[i]) {
return Ordering.Unordered;
}
}
if (a.length === b.length) {
return Ordering.Equal;
} else if (a.length < b.length) {
return Ordering.StrictSuperset;
} else {
return Ordering.StrictSubset;
}
}
export function comparePublicParamsPaths(a: TestParams, b: TestParams): Ordering {
const aKeys = Object.keys(a).filter(k => paramKeyIsPublic(k));
const commonKeys = new Set(aKeys.filter(k => k in b));
for (const k of commonKeys) {
if (!objectEquals(a[k], b[k])) {
return Ordering.Unordered;
}
}
const bKeys = Object.keys(b).filter(k => paramKeyIsPublic(k));
const aRemainingKeys = aKeys.length - commonKeys.size;
const bRemainingKeys = bKeys.length - commonKeys.size;
if (aRemainingKeys === 0 && bRemainingKeys === 0) return Ordering.Equal;
if (aRemainingKeys === 0) return Ordering.StrictSuperset;
if (bRemainingKeys === 0) return Ordering.StrictSubset;
return Ordering.Unordered;
}
|
sarahM0/cts
|
src/webgpu/api/validation/encoding/programmable/pipeline_bind_group_compat.spec.ts
|
export const description = `
TODO:
- test compatibility between bind groups and pipelines
- the binding resource in bindGroups[i].layout is "group-equivalent" (value-equal) to pipelineLayout.bgls[i].
- in the test fn, test once without the dispatch/draw (should always be valid) and once with
the dispatch/draw, to make sure the validation happens in dispatch/draw.
- x= {dispatch, all draws} (dispatch/draw should be size 0 to make sure validation still happens if no-op)
- x= all relevant stages
TODO: subsume existing test, rewrite fixture as needed.
`;
import { kUnitCaseParamsBuilder } from '../../../../../common/framework/params_builder.js';
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { memcpy, unreachable } from '../../../../../common/util/util.js';
import {
kSamplerBindingTypes,
kShaderStageCombinations,
kBufferBindingTypes,
ValidBindableResource,
} from '../../../../capability_info.js';
import { GPUConst } from '../../../../constants.js';
import {
ProgrammableEncoderType,
kProgrammableEncoderTypes,
} from '../../../../util/command_buffer_maker.js';
import { ValidationTest } from '../../validation_test.js';
const kComputeCmds = ['dispatch', 'dispatchIndirect'] as const;
type ComputeCmd = typeof kComputeCmds[number];
const kRenderCmds = ['draw', 'drawIndexed', 'drawIndirect', 'drawIndexedIndirect'] as const;
type RenderCmd = typeof kRenderCmds[number];
// Test resource type compatibility in pipeline and bind group
// TODO: Add externalTexture
const kResourceTypes: ValidBindableResource[] = [
'uniformBuf',
'filtSamp',
'sampledTex',
'storageTex',
];
function getTestCmds(
encoderType: ProgrammableEncoderType
): readonly ComputeCmd[] | readonly RenderCmd[] {
return encoderType === 'compute pass' ? kComputeCmds : kRenderCmds;
}
const kCompatTestParams = kUnitCaseParamsBuilder
.combine('encoderType', kProgrammableEncoderTypes)
.expand('call', p => getTestCmds(p.encoderType))
.combine('callWithZero', [true, false]);
class F extends ValidationTest {
getIndexBuffer(): GPUBuffer {
return this.device.createBuffer({
size: 8 * Uint32Array.BYTES_PER_ELEMENT,
usage: GPUBufferUsage.INDEX,
});
}
getIndirectBuffer(indirectParams: Array<number>): GPUBuffer {
const buffer = this.device.createBuffer({
mappedAtCreation: true,
size: indirectParams.length * Uint32Array.BYTES_PER_ELEMENT,
usage: GPUBufferUsage.INDIRECT | GPUBufferUsage.COPY_DST,
});
memcpy({ src: new Uint32Array(indirectParams) }, { dst: buffer.getMappedRange() });
buffer.unmap();
return buffer;
}
getBindingResourceType(entry: GPUBindGroupLayoutEntry): ValidBindableResource {
if (entry.buffer !== undefined) return 'uniformBuf';
if (entry.sampler !== undefined) return 'filtSamp';
if (entry.texture !== undefined) return 'sampledTex';
if (entry.storageTexture !== undefined) return 'storageTex';
unreachable();
}
createRenderPipelineWithLayout(
bindGroups: Array<Array<GPUBindGroupLayoutEntry>>
): GPURenderPipeline {
const shader = `
[[stage(vertex)]] fn vs_main() -> [[builtin(position)]] vec4<f32> {
return vec4<f32>(1.0, 1.0, 0.0, 1.0);
}
[[stage(fragment)]] fn fs_main() -> [[location(0)]] vec4<f32> {
return vec4<f32>(0.0, 1.0, 0.0, 1.0);
}
`;
const module = this.device.createShaderModule({ code: shader });
const pipeline = this.device.createRenderPipeline({
layout: this.device.createPipelineLayout({
bindGroupLayouts: bindGroups.map(entries => this.device.createBindGroupLayout({ entries })),
}),
vertex: {
module,
entryPoint: 'vs_main',
},
fragment: {
module,
entryPoint: 'fs_main',
targets: [{ format: 'rgba8unorm' }],
},
primitive: { topology: 'triangle-list' },
});
return pipeline;
}
createComputePipelineWithLayout(
bindGroups: Array<Array<GPUBindGroupLayoutEntry>>
): GPUComputePipeline {
const shader = `
[[stage(compute), workgroup_size(1, 1, 1)]]
fn main([[builtin(global_invocation_id)]] GlobalInvocationID : vec3<u32>) {
}
`;
const module = this.device.createShaderModule({ code: shader });
const pipeline = this.device.createComputePipeline({
layout: this.device.createPipelineLayout({
bindGroupLayouts: bindGroups.map(entries => this.device.createBindGroupLayout({ entries })),
}),
compute: {
module,
entryPoint: 'main',
},
});
return pipeline;
}
createBindGroupWithLayout(bglEntries: Array<GPUBindGroupLayoutEntry>): GPUBindGroup {
const bgEntries: Array<GPUBindGroupEntry> = [];
for (const entry of bglEntries) {
const resource = this.getBindingResource(this.getBindingResourceType(entry));
bgEntries.push({
binding: entry.binding,
resource,
});
}
return this.device.createBindGroup({
entries: bgEntries,
layout: this.device.createBindGroupLayout({ entries: bglEntries }),
});
}
doCompute(pass: GPUComputePassEncoder, call: ComputeCmd | undefined, callWithZero: boolean) {
const x = callWithZero ? 0 : 1;
switch (call) {
case 'dispatch':
pass.dispatch(x, 1, 1);
break;
case 'dispatchIndirect':
pass.dispatchIndirect(this.getIndirectBuffer([x, 1, 1]), 0);
break;
default:
break;
}
}
doRender(
pass: GPURenderPassEncoder | GPURenderBundleEncoder,
call: RenderCmd | undefined,
callWithZero: boolean
) {
const vertexCount = callWithZero ? 0 : 3;
switch (call) {
case 'draw':
pass.draw(vertexCount, 1, 0, 0);
break;
case 'drawIndexed':
pass.setIndexBuffer(this.getIndexBuffer(), 'uint32');
pass.drawIndexed(vertexCount, 1, 0, 0, 0);
break;
case 'drawIndirect':
pass.drawIndirect(this.getIndirectBuffer([vertexCount, 1, 0, 0, 0]), 0);
break;
case 'drawIndexedIndirect':
pass.setIndexBuffer(this.getIndexBuffer(), 'uint32');
pass.drawIndexedIndirect(this.getIndirectBuffer([vertexCount, 1, 0, 0, 0]), 0);
break;
default:
break;
}
}
createBindGroupLayoutEntry(
encoderType: ProgrammableEncoderType,
resourceType: ValidBindableResource,
useU32Array: boolean
): GPUBindGroupLayoutEntry {
const entry: GPUBindGroupLayoutEntry = {
binding: 0,
visibility: encoderType === 'compute pass' ? GPUShaderStage.COMPUTE : GPUShaderStage.FRAGMENT,
};
switch (resourceType) {
case 'uniformBuf':
entry.buffer = { hasDynamicOffset: useU32Array }; // default type: uniform
break;
case 'filtSamp':
entry.sampler = {}; // default type: filtering
break;
case 'sampledTex':
entry.texture = {}; // default sampleType: float
break;
case 'storageTex':
entry.storageTexture = { access: 'write-only', format: 'rgba8unorm' };
break;
}
return entry;
}
runTest(
encoderType: ProgrammableEncoderType,
pipeline: GPUComputePipeline | GPURenderPipeline,
bindGroups: Array<GPUBindGroup | undefined>,
dynamicOffsets: Array<number> | undefined,
call: ComputeCmd | RenderCmd | undefined,
callWithZero: boolean,
success: boolean
) {
const { encoder, validateFinish } = this.createEncoder(encoderType);
if (encoder instanceof GPUComputePassEncoder) {
encoder.setPipeline(pipeline as GPUComputePipeline);
} else {
encoder.setPipeline(pipeline as GPURenderPipeline);
}
for (let i = 0; i < bindGroups.length; i++) {
const bindGroup = bindGroups[i];
if (!bindGroup) {
break;
}
if (dynamicOffsets) {
encoder.setBindGroup(
i,
bindGroup,
new Uint32Array(dynamicOffsets),
0,
dynamicOffsets.length
);
} else {
encoder.setBindGroup(i, bindGroup);
}
}
if (encoder instanceof GPUComputePassEncoder) {
this.doCompute(encoder, call as ComputeCmd, callWithZero);
} else {
this.doRender(encoder, call as RenderCmd, callWithZero);
}
validateFinish(success);
}
}
export const g = makeTestGroup(F);
g.test('bind_groups_and_pipeline_layout_mismatch')
.desc(
`
Tests the bind groups must match the requirements of the pipeline layout.
- bind groups required by the pipeline layout are required.
- bind groups unused by the pipeline layout can be set or not.
`
)
.params(
kCompatTestParams
.beginSubcases()
.combineWithParams([
{ setBindGroup0: true, setBindGroup1: true, setUnusedBindGroup2: true, _success: true },
{ setBindGroup0: true, setBindGroup1: true, setUnusedBindGroup2: false, _success: true },
{ setBindGroup0: true, setBindGroup1: false, setUnusedBindGroup2: true, _success: false },
{ setBindGroup0: false, setBindGroup1: true, setUnusedBindGroup2: true, _success: false },
{ setBindGroup0: false, setBindGroup1: false, setUnusedBindGroup2: false, _success: false },
])
.combine('useU32Array', [false, true])
)
.fn(t => {
const {
encoderType,
call,
callWithZero,
setBindGroup0,
setBindGroup1,
setUnusedBindGroup2,
_success,
useU32Array,
} = t.params;
const visibility =
encoderType === 'compute pass' ? GPUShaderStage.COMPUTE : GPUShaderStage.VERTEX;
const bindGroupLayouts: Array<Array<GPUBindGroupLayoutEntry>> = [
// bind group layout 0
[
{
binding: 0,
visibility,
buffer: { hasDynamicOffset: useU32Array }, // default type: uniform
},
],
// bind group layout 1
[
{
binding: 0,
visibility,
buffer: { hasDynamicOffset: useU32Array }, // default type: uniform
},
],
];
// Create required bind groups
const bindGroup0 = setBindGroup0 ? t.createBindGroupWithLayout(bindGroupLayouts[0]) : undefined;
const bindGroup1 = setBindGroup1 ? t.createBindGroupWithLayout(bindGroupLayouts[1]) : undefined;
const unusedBindGroup2 = setUnusedBindGroup2
? t.createBindGroupWithLayout(bindGroupLayouts[1])
: undefined;
// Create fixed pipeline
const pipeline =
encoderType === 'compute pass'
? t.createComputePipelineWithLayout(bindGroupLayouts)
: t.createRenderPipelineWithLayout(bindGroupLayouts);
const dynamicOffsets = useU32Array ? [0] : undefined;
// Test without the dispatch/draw (should always be valid)
t.runTest(
encoderType,
pipeline,
[bindGroup0, bindGroup1, unusedBindGroup2],
dynamicOffsets,
undefined,
false,
true
);
// Test with the dispatch/draw, to make sure the validation happens in dispatch/draw.
t.runTest(
encoderType,
pipeline,
[bindGroup0, bindGroup1, unusedBindGroup2],
dynamicOffsets,
call,
callWithZero,
_success
);
});
g.test('buffer_binding,render_pipeline')
.desc(
`
The GPUBufferBindingLayout bindings configure should be exactly
same in PipelineLayout and bindgroup.
- TODO: test more draw functions, e.g. indirect
- TODO: test more visibilities, e.g. vetex
- TODO: bind group should be created with different layout
`
)
.params(u => u.combine('type', kBufferBindingTypes))
.fn(async t => {
const { type } = t.params;
// Create fixed bindGroup
const uniformBuffer = t.getUniformBuffer();
const bindGroup = t.device.createBindGroup({
entries: [
{
binding: 0,
resource: {
buffer: uniformBuffer,
},
},
],
layout: t.device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.FRAGMENT,
buffer: {}, // default type: uniform
},
],
}),
});
// Create pipeline with different layouts
const pipeline = t.createRenderPipelineWithLayout([
[
{
binding: 0,
visibility: GPUShaderStage.FRAGMENT,
buffer: {
type,
},
},
],
]);
const { encoder, validateFinish } = t.createEncoder('render pass');
encoder.setPipeline(pipeline);
encoder.setBindGroup(0, bindGroup);
encoder.draw(3);
validateFinish(type === undefined || type === 'uniform');
});
g.test('sampler_binding,render_pipeline')
.desc(
`
The GPUSamplerBindingLayout bindings configure should be exactly
same in PipelineLayout and bindgroup.
- TODO: test more draw functions, e.g. indirect
- TODO: test more visibilities, e.g. vetex
`
)
.params(u =>
u //
.combine('bglType', kSamplerBindingTypes)
.combine('bgType', kSamplerBindingTypes)
)
.fn(async t => {
const { bglType, bgType } = t.params;
const bindGroup = t.device.createBindGroup({
entries: [
{
binding: 0,
resource:
bgType === 'comparison'
? t.device.createSampler({ compare: 'always' })
: t.device.createSampler(),
},
],
layout: t.device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.FRAGMENT,
sampler: { type: bgType },
},
],
}),
});
// Create pipeline with different layouts
const pipeline = t.createRenderPipelineWithLayout([
[
{
binding: 0,
visibility: GPUShaderStage.FRAGMENT,
sampler: {
type: bglType,
},
},
],
]);
const { encoder, validateFinish } = t.createEncoder('render pass');
encoder.setPipeline(pipeline);
encoder.setBindGroup(0, bindGroup);
encoder.draw(3);
validateFinish(bglType === bgType);
});
g.test('bgl_binding_mismatch')
.desc(
'Tests the binding number must exist or not exist in both bindGroups[i].layout and pipelineLayout.bgls[i]'
)
.params(
kCompatTestParams
.beginSubcases()
.combineWithParams([
{ bgBindings: [0, 1, 2], plBindings: [0, 1, 2], _success: true },
{ bgBindings: [0, 1, 2], plBindings: [0, 1, 3], _success: false },
{ bgBindings: [0, 2], plBindings: [0, 2], _success: true },
{ bgBindings: [0, 2], plBindings: [2, 0], _success: true },
{ bgBindings: [0, 1, 2], plBindings: [0, 1], _success: false },
{ bgBindings: [0, 1], plBindings: [0, 1, 2], _success: false },
])
.combine('useU32Array', [false, true])
)
.fn(t => {
const {
encoderType,
call,
callWithZero,
bgBindings,
plBindings,
_success,
useU32Array,
} = t.params;
const visibility =
encoderType === 'compute pass' ? GPUShaderStage.COMPUTE : GPUShaderStage.VERTEX;
const bglEntries: Array<GPUBindGroupLayoutEntry> = [];
for (const binding of bgBindings) {
bglEntries.push({
binding,
visibility,
buffer: { hasDynamicOffset: useU32Array }, // default type: uniform
});
}
const bindGroup = t.createBindGroupWithLayout(bglEntries);
const plEntries: Array<Array<GPUBindGroupLayoutEntry>> = [[]];
for (const binding of plBindings) {
plEntries[0].push({
binding,
visibility,
buffer: { hasDynamicOffset: useU32Array }, // default type: uniform
});
}
const pipeline =
encoderType === 'compute pass'
? t.createComputePipelineWithLayout(plEntries)
: t.createRenderPipelineWithLayout(plEntries);
const dynamicOffsets = useU32Array ? new Array(bgBindings.length).fill(0) : undefined;
// Test without the dispatch/draw (should always be valid)
t.runTest(encoderType, pipeline, [bindGroup], dynamicOffsets, undefined, false, true);
// Test with the dispatch/draw, to make sure the validation happens in dispatch/draw.
t.runTest(encoderType, pipeline, [bindGroup], dynamicOffsets, call, callWithZero, _success);
});
g.test('bgl_visibility_mismatch')
.desc('Tests the visibility in bindGroups[i].layout and pipelineLayout.bgls[i] must be matched')
.params(
kCompatTestParams
.beginSubcases()
.combine('bgVisibility', kShaderStageCombinations)
.expand('plVisibility', p =>
p.encoderType === 'compute pass'
? ([GPUConst.ShaderStage.COMPUTE] as const)
: ([
GPUConst.ShaderStage.VERTEX,
GPUConst.ShaderStage.FRAGMENT,
GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT,
] as const)
)
.combine('useU32Array', [false, true])
)
.fn(t => {
const { encoderType, call, callWithZero, bgVisibility, plVisibility, useU32Array } = t.params;
const bglEntries: Array<GPUBindGroupLayoutEntry> = [
{
binding: 0,
visibility: bgVisibility,
buffer: { hasDynamicOffset: useU32Array }, // default type: uniform
},
];
const bindGroup = t.createBindGroupWithLayout(bglEntries);
const plEntries: Array<Array<GPUBindGroupLayoutEntry>> = [
[
{
binding: 0,
visibility: plVisibility,
buffer: { hasDynamicOffset: useU32Array }, // default type: uniform
},
],
];
const pipeline =
encoderType === 'compute pass'
? t.createComputePipelineWithLayout(plEntries)
: t.createRenderPipelineWithLayout(plEntries);
const dynamicOffsets = useU32Array ? [0] : undefined;
// Test without the dispatch/draw (should always be valid)
t.runTest(encoderType, pipeline, [bindGroup], dynamicOffsets, undefined, false, true);
// Test with the dispatch/draw, to make sure the validation happens in dispatch/draw.
t.runTest(
encoderType,
pipeline,
[bindGroup],
dynamicOffsets,
call,
callWithZero,
bgVisibility === plVisibility
);
});
g.test('bgl_resource_type_mismatch')
.desc(
`
Tests the binding resource type in bindGroups[i].layout and pipelineLayout.bgls[i] must be matched
- TODO: Test externalTexture
`
)
.params(
kCompatTestParams
.beginSubcases()
.combine('bgResourceType', kResourceTypes)
.combine('plResourceType', kResourceTypes)
.expand('useU32Array', p => (p.bgResourceType === 'uniformBuf' ? [true, false] : [false]))
)
.fn(t => {
const {
encoderType,
call,
callWithZero,
bgResourceType,
plResourceType,
useU32Array,
} = t.params;
const bglEntries: Array<GPUBindGroupLayoutEntry> = [
t.createBindGroupLayoutEntry(encoderType, bgResourceType, useU32Array),
];
const bindGroup = t.createBindGroupWithLayout(bglEntries);
const plEntries: Array<Array<GPUBindGroupLayoutEntry>> = [
[t.createBindGroupLayoutEntry(encoderType, plResourceType, useU32Array)],
];
const pipeline =
encoderType === 'compute pass'
? t.createComputePipelineWithLayout(plEntries)
: t.createRenderPipelineWithLayout(plEntries);
const dynamicOffsets = useU32Array ? [0] : undefined;
// Test without the dispatch/draw (should always be valid)
t.runTest(encoderType, pipeline, [bindGroup], dynamicOffsets, undefined, false, true);
// Test with the dispatch/draw, to make sure the validation happens in dispatch/draw.
t.runTest(
encoderType,
pipeline,
[bindGroup],
dynamicOffsets,
call,
callWithZero,
bgResourceType === plResourceType
);
});
|
sarahM0/cts
|
src/webgpu/util/texture/subresource.ts
|
<filename>src/webgpu/util/texture/subresource.ts
/** A range of indices expressed as { begin, count }. */
export interface BeginCountRange {
begin: number;
count: number;
}
/* A range of indices, expressed as { begin, end }. */
export interface BeginEndRange {
begin: number;
end: number;
}
function endOfRange(r: BeginEndRange | BeginCountRange): number {
return 'count' in r ? r.begin + r.count : r.end;
}
function* rangeAsIterator(r: BeginEndRange | BeginCountRange): Generator<number> {
for (let i = r.begin; i < endOfRange(r); ++i) {
yield i;
}
}
/**
* Represents a range of subresources of a single-plane texture:
* a min/max mip level and min/max array layer.
*/
export class SubresourceRange {
readonly mipRange: BeginEndRange;
readonly layerRange: BeginEndRange;
constructor(subresources: {
mipRange: BeginEndRange | BeginCountRange;
layerRange: BeginEndRange | BeginCountRange;
}) {
this.mipRange = {
begin: subresources.mipRange.begin,
end: endOfRange(subresources.mipRange),
};
this.layerRange = {
begin: subresources.layerRange.begin,
end: endOfRange(subresources.layerRange),
};
}
/**
* Iterates over the "rectangle" of { mip level, array layer } pairs represented by the range.
*/
*each(): Generator<{ level: number; layer: number }> {
for (let level = this.mipRange.begin; level < this.mipRange.end; ++level) {
for (let layer = this.layerRange.begin; layer < this.layerRange.end; ++layer) {
yield { level, layer };
}
}
}
/**
* Iterates over the mip levels represented by the range, each level including an iterator
* over the array layers at that level.
*/
*mipLevels(): Generator<{ level: number; layers: Generator<number> }> {
for (let level = this.mipRange.begin; level < this.mipRange.end; ++level) {
yield {
level,
layers: rangeAsIterator(this.layerRange),
};
}
}
}
|
sarahM0/cts
|
src/webgpu/api/validation/encoding/cmds/debug.spec.ts
|
export const description = `
API validation test for debug groups and markers
Test Coverage:
- For each encoder type (GPUCommandEncoder, GPUComputeEncoder, GPURenderPassEncoder,
GPURenderBundleEncoder):
- Test that all pushDebugGroup must have a corresponding popDebugGroup
- Push and pop counts of 0, 1, and 2 will be used.
- An error must be generated for non matching counts.
- Test calling pushDebugGroup with empty and non-empty strings.
- Test inserting a debug marker with empty and non-empty strings.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { kEncoderTypes } from '../../../../util/command_buffer_maker.js';
import { ValidationTest } from '../../validation_test.js';
export const g = makeTestGroup(ValidationTest);
g.test('debug_group_balanced')
.params(u =>
u
.combine('encoderType', kEncoderTypes)
.beginSubcases()
.combine('pushCount', [0, 1, 2])
.combine('popCount', [0, 1, 2])
)
.fn(t => {
const { encoder, validateFinishAndSubmit } = t.createEncoder(t.params.encoderType);
for (let i = 0; i < t.params.pushCount; ++i) {
encoder.pushDebugGroup(`${i}`);
}
for (let i = 0; i < t.params.popCount; ++i) {
encoder.popDebugGroup();
}
validateFinishAndSubmit(t.params.pushCount === t.params.popCount, true);
});
g.test('debug_group')
.params(u =>
u //
.combine('encoderType', kEncoderTypes)
.beginSubcases()
.combine('label', ['', 'group'])
)
.fn(t => {
const { encoder, validateFinishAndSubmit } = t.createEncoder(t.params.encoderType);
encoder.pushDebugGroup(t.params.label);
encoder.popDebugGroup();
validateFinishAndSubmit(true, true);
});
g.test('debug_marker')
.params(u =>
u //
.combine('encoderType', kEncoderTypes)
.beginSubcases()
.combine('label', ['', 'marker'])
)
.fn(t => {
const { encoder, validateFinishAndSubmit } = t.createEncoder(t.params.encoderType);
encoder.insertDebugMarker(t.params.label);
validateFinishAndSubmit(true, true);
});
|
sarahM0/cts
|
src/unittests/test_group_test.ts
|
<filename>src/unittests/test_group_test.ts
import { Logger, LogResults } from '../common/internal/logging/logger.js';
import { TestQuerySingleCase } from '../common/internal/query/query.js';
import { IterableTestGroup, TestCaseID } from '../common/internal/test_group.js';
import { objectEquals } from '../common/util/util.js';
import { UnitTest } from './unit_test.js';
export class TestGroupTest extends UnitTest {
async run(g: IterableTestGroup): Promise<LogResults> {
const logger = new Logger({ overrideDebugMode: true });
for (const t of g.iterate()) {
for (const rc of t.iterate()) {
const query = new TestQuerySingleCase('xx', ['yy'], rc.id.test, rc.id.params);
const [rec] = logger.record(query.toString());
await rc.run(rec, query, []);
}
}
return logger.results;
}
expectCases(g: IterableTestGroup, cases: TestCaseID[]): void {
const gcases = [];
for (const t of g.iterate()) {
gcases.push(...Array.from(t.iterate(), c => c.id));
}
this.expect(objectEquals(gcases, cases));
}
}
|
sarahM0/cts
|
src/unittests/query_compare.spec.ts
|
export const description = `
Tests for TestQuery comparison
`;
import { makeTestGroup } from '../common/framework/test_group.js';
import { compareQueries, Ordering } from '../common/internal/query/compare.js';
import {
TestQuery,
TestQuerySingleCase,
TestQueryMultiFile,
TestQueryMultiTest,
TestQueryMultiCase,
} from '../common/internal/query/query.js';
import { UnitTest } from './unit_test.js';
class F extends UnitTest {
expectQ(a: TestQuery, exp: '<' | '=' | '>' | '!', b: TestQuery) {
const [expOrdering, expInvOrdering] =
exp === '<'
? [Ordering.StrictSubset, Ordering.StrictSuperset]
: exp === '='
? [Ordering.Equal, Ordering.Equal]
: exp === '>'
? [Ordering.StrictSuperset, Ordering.StrictSubset]
: [Ordering.Unordered, Ordering.Unordered];
{
const act = compareQueries(a, b);
this.expect(act === expOrdering, `${a} ${b} got ${act}, exp ${expOrdering}`);
}
{
const act = compareQueries(a, b);
this.expect(act === expOrdering, `${b} ${a} got ${act}, exp ${expInvOrdering}`);
}
}
expectWellOrdered(...qs: TestQuery[]) {
for (let i = 0; i < qs.length; ++i) {
this.expectQ(qs[i], '=', qs[i]);
for (let j = i + 1; j < qs.length; ++j) {
this.expectQ(qs[i], '>', qs[j]);
}
}
}
expectUnordered(...qs: TestQuery[]) {
for (let i = 0; i < qs.length; ++i) {
this.expectQ(qs[i], '=', qs[i]);
for (let j = i + 1; j < qs.length; ++j) {
this.expectQ(qs[i], '!', qs[j]);
}
}
}
}
export const g = makeTestGroup(F);
// suite:* > suite:a,* > suite:a,b,* > suite:a,b:*
// suite:a,b:* > suite:a,b:c,* > suite:a,b:c,d,* > suite:a,b:c,d:*
// suite:a,b:c,d:* > suite:a,b:c,d:x=1;* > suite:a,b:c,d:x=1;y=2;* > suite:a,b:c,d:x=1;y=2
// suite:a;* (unordered) suite:b;*
g.test('well_ordered').fn(t => {
t.expectWellOrdered(
new TestQueryMultiFile('suite', []),
new TestQueryMultiFile('suite', ['a']),
new TestQueryMultiFile('suite', ['a', 'b']),
new TestQueryMultiTest('suite', ['a', 'b'], []),
new TestQueryMultiTest('suite', ['a', 'b'], ['c']),
new TestQueryMultiTest('suite', ['a', 'b'], ['c', 'd']),
new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], {}),
new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 1 }),
new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 1, y: 2 }),
new TestQuerySingleCase('suite', ['a', 'b'], ['c', 'd'], { x: 1, y: 2 })
);
t.expectWellOrdered(
new TestQueryMultiFile('suite', []),
new TestQueryMultiFile('suite', ['a']),
new TestQueryMultiFile('suite', ['a', 'b']),
new TestQueryMultiTest('suite', ['a', 'b'], []),
new TestQueryMultiTest('suite', ['a', 'b'], ['c']),
new TestQueryMultiTest('suite', ['a', 'b'], ['c', 'd']),
new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], {}),
new TestQuerySingleCase('suite', ['a', 'b'], ['c', 'd'], {})
);
});
g.test('unordered').fn(t => {
t.expectUnordered(
new TestQueryMultiFile('suite', ['a']), //
new TestQueryMultiFile('suite', ['x'])
);
t.expectUnordered(
new TestQueryMultiFile('suite', ['a', 'b']),
new TestQueryMultiFile('suite', ['a', 'x'])
);
t.expectUnordered(
new TestQueryMultiTest('suite', ['a', 'b'], ['c']),
new TestQueryMultiTest('suite', ['a', 'b'], ['x']),
new TestQueryMultiTest('suite', ['a'], []),
new TestQueryMultiTest('suite', ['a', 'x'], [])
);
t.expectUnordered(
new TestQueryMultiTest('suite', ['a', 'b'], ['c', 'd']),
new TestQueryMultiTest('suite', ['a', 'b'], ['c', 'x']),
new TestQueryMultiTest('suite', ['a'], []),
new TestQueryMultiTest('suite', ['a', 'x'], [])
);
t.expectUnordered(
new TestQueryMultiTest('suite', ['a', 'b'], ['c', 'd']),
new TestQueryMultiTest('suite', ['a', 'b'], ['c', 'x']),
new TestQueryMultiTest('suite', ['a'], []),
new TestQueryMultiTest('suite', ['a', 'x'], ['c'])
);
t.expectUnordered(
new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 1 }),
new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 9 }),
new TestQueryMultiCase('suite', ['a', 'b'], ['c'], { x: 9 })
);
t.expectUnordered(
new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 1, y: 2 }),
new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 1, y: 8 }),
new TestQueryMultiCase('suite', ['a', 'b'], ['c'], { x: 1, y: 8 })
);
t.expectUnordered(
new TestQuerySingleCase('suite', ['a', 'b'], ['c', 'd'], { x: 1, y: 2 }),
new TestQuerySingleCase('suite', ['a', 'b'], ['c', 'd'], { x: 1, y: 8 }),
new TestQuerySingleCase('suite', ['a', 'b'], ['c'], { x: 1, y: 8 })
);
t.expectUnordered(
new TestQuerySingleCase('suite1', ['bar', 'buzz', 'buzz'], ['zap'], {}),
new TestQueryMultiTest('suite1', ['bar'], [])
);
});
|
sarahM0/cts
|
src/webgpu/api/operation/buffers/map_detach.spec.ts
|
<reponame>sarahM0/cts
export const description = `
Tests that TypedArrays created when mapping a GPUBuffer are detached when the
buffer is unmapped or destroyed.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUConst } from '../../../constants.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
g.test('while_mapped')
.desc(
`
Test that a mapped buffers are able to properly detach.
- Tests {mappable, unmappable mapAtCreation, mappable mapAtCreation}
- Tests while {mapped, mapped at creation, mapped at creation then unmapped and mapped again}`
)
.paramsSubcasesOnly(u =>
u
.combine('mappedAtCreation', [false, true])
.combineWithParams([
{ usage: GPUConst.BufferUsage.COPY_SRC },
{ usage: GPUConst.BufferUsage.MAP_WRITE | GPUConst.BufferUsage.COPY_SRC },
{ usage: GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.MAP_READ },
{
usage: GPUConst.BufferUsage.MAP_WRITE | GPUConst.BufferUsage.COPY_SRC,
mapMode: GPUConst.MapMode.WRITE,
},
{
usage: GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.MAP_READ,
mapMode: GPUConst.MapMode.READ,
},
])
.combineWithParams([
{ unmap: true, destroy: false },
{ unmap: false, destroy: true },
{ unmap: true, destroy: true },
])
.unless(p => p.mappedAtCreation === false && p.mapMode === undefined)
)
.fn(async t => {
const { usage, mapMode, mappedAtCreation, unmap, destroy } = t.params;
const buffer = t.device.createBuffer({
size: 4,
usage,
mappedAtCreation,
});
if (mapMode !== undefined) {
if (mappedAtCreation) {
buffer.unmap();
}
await buffer.mapAsync(mapMode);
}
const arrayBuffer = buffer.getMappedRange();
const view = new Uint8Array(arrayBuffer);
t.expect(arrayBuffer.byteLength === 4);
t.expect(view.length === 4);
if (unmap) buffer.unmap();
if (destroy) buffer.destroy();
t.expect(arrayBuffer.byteLength === 0, 'ArrayBuffer should be detached');
t.expect(view.byteLength === 0, 'ArrayBufferView should be detached');
});
|
sarahM0/cts
|
src/webgpu/api/validation/encoding/cmds/render/draw.spec.ts
|
<reponame>sarahM0/cts<filename>src/webgpu/api/validation/encoding/cmds/render/draw.spec.ts
export const description = `
Here we test the validation for draw functions, mainly the buffer access validation. All four types
of draw calls are tested, and test that validation errors do / don't occur for certain call type
and parameters as expect.
`;
import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { ValidationTest } from '../../../validation_test.js';
class F extends ValidationTest {
// TODO: Implement the helper functions
}
export const g = makeTestGroup(F);
g.test(`unused_buffer_bound`)
.desc(
`
In this test we test that a small buffer bound to unused buffer slot won't cause validation error.
- All draw commands,
- An unused {index , vertex} buffer with uselessly small range is bound (immediately before draw
call)
`
)
.unimplemented();
g.test(`index_buffer_OOB`)
.desc(
`
In this test we test that index buffer OOB is catched as validation error in drawIndexed, but not in
drawIndexedIndirect as it is GPU-validated.
- Issue an indexed draw call, with the following index buffer states, for {all index formats}:
- range and GPUBuffer are exactly the required size for the draw call
- range is too small but GPUBuffer is still large enough
- range and GPUBuffer are both too small
`
)
.unimplemented();
g.test(`vertex_buffer_OOB`)
.desc(
`
In this test we test the vertex buffer OOB validation in draw calls. Specifically, only vertex step
mode buffer OOB in draw and instance step mode buffer OOB in draw and drawIndexed are CPU-validated.
Other cases are handled by robust access and no validation error occurs.
- Test that:
- Draw call needs to read {=, >} any bound vertex buffer range, with GPUBuffer that is {large
enough, exactly the size of bound range}
- Binding size = 0 (ensure it's not treated as a special case)
- x= weird offset values
- x= weird arrayStride values
- x= {render pass, render bundle}
- For vertex step mode vertex buffer,
- Test that:
- vertexCount largeish
- firstVertex {=, >} 0
- arrayStride is 0 and bound buffer size too small
- Validation error occurs in:
- draw
- drawIndexed with a zero array stride vertex step mode buffer OOB
- Otherwise no validation error in drawIndexed, draIndirect and drawIndexedIndirect
- For instance step mode vertex buffer,
- Test with draw and drawIndexed:
- instanceCount largeish
- firstInstance {=, >} 0
- arrayStride is 0 and bound buffer size too small
- Validation error occurs in draw and drawIndexed
- No validation error in drawIndirect and drawIndexedIndirect
In this test, we use a a render pipeline requiring one vertex step mode and one instance step mode
vertex buffer. Then for a given drawing parameter set (e.g., vertexCount, instanceCount, firstVertex,
indexCount), we calculate the exactly required size for vertex step mode vertex buffer, instance
step mode vertex buffer and index buffer. Then, we generate buffer parameters (i.e. GPU buffer size,
binding offset and binding size) for all three buffer, covering both (bound size == required size)
and (bound size == required size - 1), and test that draw and drawIndexed will success/error as
expected. Such set of buffer parameters should include cases like weird offset values.
`
)
.unimplemented();
g.test(`last_buffer_setting_take_account`)
.desc(
`
In this test we test that only the last setting for a buffer slot take account.
- All (non/indexed, in/direct) draw commands
- setPl, setVB, setIB, draw, {setPl,setVB,setIB,nothing (control)}, then a larger draw that
wouldn't have been valid before that
`
)
.unimplemented();
g.test(`buffer_binding_overlap`)
.desc(
`
In this test we test that binding one GPU buffer to multiple vertex buffer slot or both vertex
buffer slot and index buffer will cause no validation error, with completely/partial overlap.
- x= all draw types
`
)
.unimplemented();
|
sarahM0/cts
|
src/webgpu/util/create_elements.ts
|
import { Fixture } from '../../common/framework/fixture.js';
import { unreachable } from '../../common/util/util.js';
export const allCanvasTypes = ['onscreen', 'offscreen'] as const;
export type canvasTypes = 'onscreen' | 'offscreen';
/** Create HTMLCanvas/OffscreenCanvas. */
export function createCanvas(
test: Fixture,
canvasType: 'onscreen' | 'offscreen',
width: number,
height: number
): HTMLCanvasElement | OffscreenCanvas {
let canvas: HTMLCanvasElement | OffscreenCanvas;
if (canvasType === 'onscreen') {
if (typeof document !== 'undefined') {
canvas = createOnscreenCanvas(test, width, height);
} else {
test.skip('Cannot create HTMLCanvasElement');
}
} else if (canvasType === 'offscreen') {
canvas = createOffscreenCanvas(test, width, height);
} else {
unreachable();
}
return canvas;
}
/** Create HTMLCanvasElement. */
export function createOnscreenCanvas(
test: Fixture,
width: number,
height: number
): HTMLCanvasElement {
let canvas: HTMLCanvasElement;
if (typeof document !== 'undefined') {
canvas = document.createElement('canvas');
canvas.width = width;
canvas.height = height;
} else {
test.skip('Cannot create HTMLCanvasElement');
}
return canvas;
}
/** Create OffscreenCanvas. */
export function createOffscreenCanvas(
test: Fixture,
width: number,
height: number
): OffscreenCanvas {
if (typeof OffscreenCanvas === 'undefined') {
test.skip('OffscreenCanvas is not supported');
}
return new OffscreenCanvas(width, height);
}
|
sarahM0/cts
|
src/webgpu/shader/validation/shader_io/invariant.spec.ts
|
<reponame>sarahM0/cts
export const description = `Validation tests for the invariant attribute`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { ShaderValidationTest } from '../shader_validation_test.js';
import { kBuiltins } from './builtins.spec.js';
import { generateShader } from './util.js';
export const g = makeTestGroup(ShaderValidationTest);
g.test('valid_only_with_vertex_position_builtin')
.desc(`Test that the invariant attribute is only accepted with the vertex position builtin`)
.params(u =>
u
.combineWithParams(kBuiltins)
.combine('use_struct', [true, false] as const)
.beginSubcases()
)
.fn(t => {
const code = generateShader({
attribute: `[[builtin(${t.params.name}), invariant]]`,
type: t.params.type,
stage: t.params.stage,
io: t.params.io,
use_struct: t.params.use_struct,
});
t.expectCompileResult(t.params.name === 'position', code);
});
g.test('not_valid_on_user_defined_io')
.desc(`Test that the invariant attribute is not accepted on user-defined IO attributes.`)
.params(u => u.combine('use_invariant', [true, false] as const).beginSubcases())
.fn(t => {
const invariant = t.params.use_invariant ? '[[invariant]]' : '';
const code = `
struct VertexOut {
[[location(0)]] ${invariant} loc0 : vec4<f32>;
[[builtin(position)]] position : vec4<f32>;
};
[[stage(vertex)]]
fn main() -> VertexOut {
return VertexOut();
}
`;
t.expectCompileResult(!t.params.use_invariant, code);
});
g.test('invalid_use_of_parameters')
.desc(`Test that no parameters are accepted for the invariant attribute`)
.params(u => u.combine('suffix', ['', '()', '(0)'] as const).beginSubcases())
.fn(t => {
const code = `
struct VertexOut {
[[builtin(position), invariant${t.params.suffix}]] position : vec4<f32>;
};
[[stage(vertex)]]
fn main() -> VertexOut {
return VertexOut();
}
`;
t.expectCompileResult(t.params.suffix === '', code);
});
|
sarahM0/cts
|
src/unittests/conversion.spec.ts
|
export const description = `Unit tests for conversion`;
import { makeTestGroup } from '../common/internal/test_group.js';
import { float16BitsToFloat32, float32ToFloat16Bits } from '../webgpu/util/conversion.js';
import { UnitTest } from './unit_test.js';
export const g = makeTestGroup(UnitTest);
const cases = [
[0b0_01111_0000000000, 1],
[0b0_00001_0000000000, 0.00006103515625],
[0b0_01101_0101010101, 0.33325195],
[0b0_11110_1111111111, 65504],
[0b0_00000_0000000000, 0],
[0b0_01110_0000000000, 0.5],
[0b0_01100_1001100110, 0.1999512],
[0b0_01111_0000000001, 1.00097656],
[0b0_10101_1001000000, 100],
[0b1_01100_1001100110, -0.1999512],
[0b1_10101_1001000000, -100],
];
g.test('conversion,float16BitsToFloat32').fn(t => {
cases.forEach(value => {
// some loose check
t.expect(Math.abs(float16BitsToFloat32(value[0]) - value[1]) <= 0.00001, value[0].toString(2));
});
});
g.test('conversion,float32ToFloat16Bits').fn(t => {
cases.forEach(value => {
// some loose check
// Does not handle clamping, underflow, overflow, or denormalized numbers.
t.expect(Math.abs(float32ToFloat16Bits(value[1]) - value[0]) <= 1, value[1].toString());
});
});
|
sarahM0/cts
|
src/webgpu/api/operation/render_pipeline/sample_mask.spec.ts
|
<gh_stars>10-100
export const description = `
TODO:
- for sampleCount = { 1, 4 } and various combinations of:
- rasterization mask = { 0, 1, 2, 3, 15 }
- sample mask = { 0, 1, 2, 3, 15, 30 }
- fragment shader output mask (SV_Coverage) = { 0, 1, 2, 3, 15, 30 }
- test that final sample mask is the logical AND of all the
relevant masks -- meaning that the samples not included in the final mask are discarded
for all the { color outputs, depth tests, stencil operations } on any attachments.
- [choosing 30 = 2 + 4 + 8 + 16 because the 5th bit should be ignored]
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
|
sarahM0/cts
|
src/webgpu/shader/validation/shader_validation_test.ts
|
<reponame>sarahM0/cts<filename>src/webgpu/shader/validation/shader_validation_test.ts<gh_stars>10-100
import { ErrorWithExtra } from '../../../common/util/util.js';
import { GPUTest } from '../../gpu_test.js';
/**
* Base fixture for WGSL shader validation tests.
*/
export class ShaderValidationTest extends GPUTest {
/**
* Add a test expectation for whether a createShaderModule call succeeds or not.
*
* @example
* ```ts
* t.expectCompileResult(true, `wgsl code`); // Expect success
* t.expectCompileResult(false, `wgsl code`); // Expect validation error with any error string
* t.expectCompileResult('substr', `wgsl code`); // Expect validation error containing 'substr'
* ```
*
* TODO(gpuweb/gpuweb#1813): Remove the "string" overload if there are no standard error codes.
*/
expectCompileResult(expectedResult: boolean | string, code: string) {
let shaderModule: GPUShaderModule;
this.expectGPUError(
'validation',
() => {
shaderModule = this.device.createShaderModule({ code });
},
expectedResult !== true
);
const error = new ErrorWithExtra('', () => ({ shaderModule }));
this.eventualAsyncExpectation(async () => {
const compilationInfo = await shaderModule!.compilationInfo();
// TODO: Pretty-print error messages with source context.
const messagesLog = compilationInfo.messages
.map(m => `${m.lineNum}:${m.linePos}: ${m.type}: ${m.message}`)
.join('\n');
error.extra.compilationInfo = compilationInfo;
if (typeof expectedResult === 'string') {
for (const msg of compilationInfo.messages) {
if (msg.type === 'error' && msg.message.indexOf(expectedResult) !== -1) {
error.message =
`Found expected compilationInfo message substring «${expectedResult}».\n` +
messagesLog;
this.rec.debug(error);
return;
}
}
// Here, no error message was found, but one was expected.
error.message = `Missing expected substring «${expectedResult}».\n` + messagesLog;
this.rec.validationFailed(error);
return;
}
if (compilationInfo.messages.some(m => m.type === 'error')) {
if (expectedResult) {
error.message = `Unexpected compilationInfo 'error' message.\n` + messagesLog;
this.rec.validationFailed(error);
} else {
error.message = `Found expected compilationInfo 'error' message.\n` + messagesLog;
this.rec.debug(error);
}
} else {
if (!expectedResult) {
error.message = `Missing expected compilationInfo 'error' message.\n` + messagesLog;
this.rec.validationFailed(error);
} else {
error.message = `No compilationInfo 'error' messages, as expected.\n` + messagesLog;
this.rec.debug(error);
}
}
});
}
}
|
sarahM0/cts
|
src/common/tools/version.ts
|
export const version = require('child_process')
.execSync('git describe --always --abbrev=0 --dirty')
.toString()
.trim();
|
sarahM0/cts
|
src/webgpu/api/operation/render_pipeline/pipeline_output_targets.spec.ts
|
export const description = `
- Test pipeline outputs with different color target formats.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { unreachable } from '../../../../common/util/util.js';
import { kRenderableColorTextureFormats, kTextureFormatInfo } from '../../../capability_info.js';
import { GPUTest } from '../../../gpu_test.js';
import { kTexelRepresentationInfo } from '../../../util/texture/texel_data.js';
class F extends GPUTest {
getFragmentShaderCode(
output: readonly number[],
sampleType: GPUTextureSampleType,
componentCount: number
): string {
let fragColorType;
let suffix;
let fractionDigits = 0;
switch (sampleType) {
case 'sint':
fragColorType = 'i32';
suffix = '';
break;
case 'uint':
fragColorType = 'u32';
suffix = 'u';
break;
case 'float':
fragColorType = 'f32';
suffix = '';
fractionDigits = 4;
break;
default:
unreachable();
}
const v = output.map(n => n.toFixed(fractionDigits));
let outputType;
let result;
switch (componentCount) {
case 1:
outputType = fragColorType;
result = `${v[0]}${suffix}`;
break;
case 2:
outputType = `vec2<${fragColorType}>`;
result = `${outputType}(${v[0]}${suffix}, ${v[1]}${suffix})`;
break;
case 3:
outputType = `vec3<${fragColorType}>`;
result = `${outputType}(${v[0]}${suffix}, ${v[1]}${suffix}, ${v[2]}${suffix})`;
break;
case 4:
outputType = `vec4<${fragColorType}>`;
result = `${outputType}(${v[0]}${suffix}, ${v[1]}${suffix}, ${v[2]}${suffix}, ${v[3]}${suffix})`;
break;
default:
unreachable();
}
return `
[[stage(fragment)]] fn main() -> [[location(0)]] ${outputType} {
return ${result};
}`;
}
}
export const g = makeTestGroup(F);
g.test('color,component_count')
.desc(
`Test that extra components of the output (e.g. f32, vec2<f32>, vec3<f32>, vec4<f32>) are discarded.`
)
.params(u =>
u
.combine('format', kRenderableColorTextureFormats)
.beginSubcases()
.combine('componentCount', [1, 2, 3, 4])
.filter(x => x.componentCount >= kTexelRepresentationInfo[x.format].componentOrder.length)
)
.fn(async t => {
const { format, componentCount } = t.params;
const info = kTextureFormatInfo[format];
await t.selectDeviceOrSkipTestCase(info.feature);
// expected RGBA values
// extra channels are discarded
const result = [0, 1, 0, 1];
const renderTarget = t.device.createTexture({
format,
size: { width: 1, height: 1, depthOrArrayLayers: 1 },
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
});
const pipeline = t.device.createRenderPipeline({
vertex: {
module: t.device.createShaderModule({
code: `
[[stage(vertex)]] fn main(
[[builtin(vertex_index)]] VertexIndex : u32
) -> [[builtin(position)]] vec4<f32> {
var pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
vec2<f32>(-1.0, -3.0),
vec2<f32>(3.0, 1.0),
vec2<f32>(-1.0, 1.0));
return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
}
`,
}),
entryPoint: 'main',
},
fragment: {
module: t.device.createShaderModule({
code: t.getFragmentShaderCode(result, info.sampleType, componentCount),
}),
entryPoint: 'main',
targets: [{ format }],
},
primitive: { topology: 'triangle-list' },
});
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginRenderPass({
colorAttachments: [
{
view: renderTarget.createView(),
storeOp: 'store',
loadValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
},
],
});
pass.setPipeline(pipeline);
pass.draw(3);
pass.endPass();
t.device.queue.submit([encoder.finish()]);
t.expectSingleColor(renderTarget, format, {
size: [1, 1, 1],
exp: { R: result[0], G: result[1], B: result[2], A: result[3] },
});
});
g.test('color,component_count,blend')
.desc(
`Test that blending behaves correctly when:
- fragment output has no alpha, but the src alpha is not used for the blend operation indicated by blend factors
- attachment format has no alpha, and the dst alpha should be assumed as 1
The attachment has a load value of [1, 0, 0, 1]
`
)
.params(u =>
u
.combine('format', ['r8unorm', 'rg8unorm', 'rgba8unorm', 'bgra8unorm'] as const)
.beginSubcases()
// _result is expected values in the color attachment (extra channels are discarded)
// output is the fragment shader output vector
// 0.498 -> 0x7f, 0.502 -> 0x80
.combineWithParams([
// fragment output has no alpha
{
_result: [0, 0, 0, 0],
output: [0],
colorSrcFactor: 'one',
colorDstFactor: 'zero',
alphaSrcFactor: 'zero',
alphaDstFactor: 'zero',
},
{
_result: [0, 0, 0, 0],
output: [0],
colorSrcFactor: 'dst-alpha',
colorDstFactor: 'zero',
alphaSrcFactor: 'zero',
alphaDstFactor: 'zero',
},
{
_result: [1, 0, 0, 0],
output: [0],
colorSrcFactor: 'one-minus-dst-alpha',
colorDstFactor: 'dst-alpha',
alphaSrcFactor: 'zero',
alphaDstFactor: 'one',
},
{
_result: [0.498, 0, 0, 0],
output: [0.498],
colorSrcFactor: 'dst-alpha',
colorDstFactor: 'zero',
alphaSrcFactor: 'zero',
alphaDstFactor: 'one',
},
{
_result: [0, 1, 0, 0],
output: [0, 1],
colorSrcFactor: 'one',
colorDstFactor: 'zero',
alphaSrcFactor: 'zero',
alphaDstFactor: 'zero',
},
{
_result: [0, 1, 0, 0],
output: [0, 1],
colorSrcFactor: 'dst-alpha',
colorDstFactor: 'zero',
alphaSrcFactor: 'zero',
alphaDstFactor: 'zero',
},
{
_result: [1, 0, 0, 0],
output: [0, 1],
colorSrcFactor: 'one-minus-dst-alpha',
colorDstFactor: 'dst-alpha',
alphaSrcFactor: 'zero',
alphaDstFactor: 'one',
},
{
_result: [0, 1, 0, 0],
output: [0, 1, 0],
colorSrcFactor: 'one',
colorDstFactor: 'zero',
alphaSrcFactor: 'zero',
alphaDstFactor: 'zero',
},
{
_result: [0, 1, 0, 0],
output: [0, 1, 0],
colorSrcFactor: 'dst-alpha',
colorDstFactor: 'zero',
alphaSrcFactor: 'zero',
alphaDstFactor: 'zero',
},
{
_result: [1, 0, 0, 0],
output: [0, 1, 0],
colorSrcFactor: 'one-minus-dst-alpha',
colorDstFactor: 'dst-alpha',
alphaSrcFactor: 'zero',
alphaDstFactor: 'one',
},
// fragment output has alpha
{
_result: [0.502, 1, 0, 0.498],
output: [0, 1, 0, 0.498],
colorSrcFactor: 'one',
colorDstFactor: 'one-minus-src-alpha',
alphaSrcFactor: 'one',
alphaDstFactor: 'zero',
},
{
_result: [0.502, 0.498, 0, 0.498],
output: [0, 1, 0, 0.498],
colorSrcFactor: 'src-alpha',
colorDstFactor: 'one-minus-src-alpha',
alphaSrcFactor: 'one',
alphaDstFactor: 'zero',
},
{
_result: [0, 1, 0, 0.498],
output: [0, 1, 0, 0.498],
colorSrcFactor: 'dst-alpha',
colorDstFactor: 'zero',
alphaSrcFactor: 'one',
alphaDstFactor: 'zero',
},
{
_result: [0, 1, 0, 0.498],
output: [0, 1, 0, 0.498],
colorSrcFactor: 'dst-alpha',
colorDstFactor: 'zero',
alphaSrcFactor: 'zero',
alphaDstFactor: 'src',
},
{
_result: [1, 0, 0, 1],
output: [0, 1, 0, 0.498],
colorSrcFactor: 'one-minus-dst-alpha',
colorDstFactor: 'dst-alpha',
alphaSrcFactor: 'zero',
alphaDstFactor: 'dst-alpha',
},
] as const)
.filter(x => x.output.length >= kTexelRepresentationInfo[x.format].componentOrder.length)
)
.fn(async t => {
const {
format,
_result,
output,
colorSrcFactor,
colorDstFactor,
alphaSrcFactor,
alphaDstFactor,
} = t.params;
const componentCount = output.length;
const info = kTextureFormatInfo[format];
await t.selectDeviceOrSkipTestCase(info.feature);
const renderTarget = t.device.createTexture({
format,
size: { width: 1, height: 1, depthOrArrayLayers: 1 },
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
});
const pipeline = t.device.createRenderPipeline({
vertex: {
module: t.device.createShaderModule({
code: `
[[stage(vertex)]] fn main(
[[builtin(vertex_index)]] VertexIndex : u32
) -> [[builtin(position)]] vec4<f32> {
var pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
vec2<f32>(-1.0, -3.0),
vec2<f32>(3.0, 1.0),
vec2<f32>(-1.0, 1.0));
return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
}
`,
}),
entryPoint: 'main',
},
fragment: {
module: t.device.createShaderModule({
code: t.getFragmentShaderCode(output, info.sampleType, componentCount),
}),
entryPoint: 'main',
targets: [
{
format,
blend: {
color: {
srcFactor: colorSrcFactor,
dstFactor: colorDstFactor,
operation: 'add',
},
alpha: {
srcFactor: alphaSrcFactor,
dstFactor: alphaDstFactor,
operation: 'add',
},
},
},
],
},
primitive: { topology: 'triangle-list' },
});
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginRenderPass({
colorAttachments: [
{
view: renderTarget.createView(),
storeOp: 'store',
loadValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
},
],
});
pass.setPipeline(pipeline);
pass.draw(3);
pass.endPass();
t.device.queue.submit([encoder.finish()]);
t.expectSingleColor(renderTarget, format, {
size: [1, 1, 1],
exp: { R: _result[0], G: _result[1], B: _result[2], A: _result[3] },
});
});
|
sarahM0/cts
|
src/webgpu/web_platform/reftests/gpu_ref_test.ts
|
<gh_stars>10-100
import { assert } from '../../../common/util/util.js';
declare function takeScreenshotDelayed(ms: number): void;
interface GPURefTest {
readonly device: GPUDevice;
readonly queue: GPUQueue;
}
export async function runRefTest(fn: (t: GPURefTest) => Promise<void>): Promise<void> {
assert(
typeof navigator !== 'undefined' && navigator.gpu !== undefined,
'No WebGPU implementation found'
);
const adapter = await navigator.gpu.requestAdapter();
assert(adapter !== null);
const device = await adapter.requestDevice();
assert(device !== null);
const queue = device.queue;
await fn({ device, queue });
takeScreenshotDelayed(50);
}
|
sarahM0/cts
|
src/webgpu/web_platform/canvas/configure.spec.ts
|
export const description = `
Tests for GPUCanvasContext.configure.
TODO: Test all options of configure.
`;
import { Fixture } from '../../../common/framework/fixture.js';
import { makeTestGroup } from '../../../common/framework/test_group.js';
export const g = makeTestGroup(Fixture);
|
sarahM0/cts
|
src/webgpu/api/validation/queue/destroyed/texture.spec.ts
|
<gh_stars>10-100
export const description = `
Tests using a destroyed texture on a queue.
- used in {writeTexture,
setBindGroup, copyT2T {src,dst}, copyB2T, copyT2B, copyExternalImageToTexture,
color attachment {0,>0}, {D,S,DS} attachment, ..?}
- x= if applicable, {in pass, in bundle}
- x= {destroyed, not destroyed (control case)}
TODO: implement. (Search for other places some of these cases may have already been tested.)
Consider whether these tests should be distributed throughout the suite, instead of centralized.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { ValidationTest } from '../../validation_test.js';
export const g = makeTestGroup(ValidationTest);
|
sarahM0/cts
|
src/webgpu/shader/validation/shader_io/locations.spec.ts
|
<reponame>sarahM0/cts<gh_stars>10-100
export const description = `Validation tests for entry point user-defined IO`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { ShaderValidationTest } from '../shader_validation_test.js';
import { generateShader } from './util.js';
export const g = makeTestGroup(ShaderValidationTest);
// List of types to test against.
const kTestTypes = [
{ type: 'bool', _valid: false },
{ type: 'u32', _valid: true },
{ type: 'i32', _valid: true },
{ type: 'f32', _valid: true },
{ type: 'vec2<bool>', _valid: false },
{ type: 'vec2<u32>', _valid: true },
{ type: 'vec2<i32>', _valid: true },
{ type: 'vec2<f32>', _valid: true },
{ type: 'vec3<bool>', _valid: false },
{ type: 'vec3<u32>', _valid: true },
{ type: 'vec3<i32>', _valid: true },
{ type: 'vec3<f32>', _valid: true },
{ type: 'vec4<bool>', _valid: false },
{ type: 'vec4<u32>', _valid: true },
{ type: 'vec4<i32>', _valid: true },
{ type: 'vec4<f32>', _valid: true },
{ type: 'mat2x2<f32>', _valid: false },
{ type: 'mat2x3<f32>', _valid: false },
{ type: 'mat2x4<f32>', _valid: false },
{ type: 'mat3x2<f32>', _valid: false },
{ type: 'mat3x3<f32>', _valid: false },
{ type: 'mat3x4<f32>', _valid: false },
{ type: 'mat4x2<f32>', _valid: false },
{ type: 'mat4x3<f32>', _valid: false },
{ type: 'mat4x4<f32>', _valid: false },
{ type: 'atomic<u32>', _valid: false },
{ type: 'atomic<i32>', _valid: false },
{ type: 'array<bool,4>', _valid: false },
{ type: 'array<u32,4>', _valid: false },
{ type: 'array<i32,4>', _valid: false },
{ type: 'array<f32,4>', _valid: false },
{ type: 'MyStruct', _valid: false },
] as const;
g.test('stage_inout')
.desc(`Test validation of user-defined IO stage and in/out usage`)
.params(u =>
u
.combine('use_struct', [true, false] as const)
.combine('target_stage', ['vertex', 'fragment', 'compute'] as const)
.combine('target_io', ['in', 'out'] as const)
.beginSubcases()
)
.fn(t => {
const code = generateShader({
attribute: '[[location(0)]]',
type: 'f32',
stage: t.params.target_stage,
io: t.params.target_io,
use_struct: t.params.use_struct,
});
// Expect to fail for compute shaders or when used as a non-struct vertex output (since the
// position built-in must also be specified).
const expectation =
t.params.target_stage === 'fragment' ||
(t.params.target_stage === 'vertex' && (t.params.target_io === 'in' || t.params.use_struct));
t.expectCompileResult(expectation, code);
});
g.test('type')
.desc(`Test validation of user-defined IO types`)
.params(u =>
u
.combine('use_struct', [true, false] as const)
.combineWithParams(kTestTypes)
.beginSubcases()
)
.fn(t => {
let code = '';
if (t.params.type === 'MyStruct') {
// Generate a struct that contains a valid type.
code += 'struct MyStruct {\n';
code += ` value : f32;\n`;
code += '};\n\n';
}
code += generateShader({
attribute: '[[location(0)]]',
type: t.params.type,
stage: 'fragment',
io: 'in',
use_struct: t.params.use_struct,
});
// Expect to pass iff a valid type is used.
t.expectCompileResult(t.params._valid, code);
});
g.test('nesting')
.desc(`Test validation of nested user-defined IO`)
.params(u =>
u
.combine('target_stage', ['vertex', 'fragment', ''] as const)
.combine('target_io', ['in', 'out'] as const)
.beginSubcases()
)
.fn(t => {
let code = '';
// Generate a struct that contains a valid type.
code += 'struct Inner {\n';
code += ` [[location(0)]] value : f32;\n`;
code += '};\n\n';
code += 'struct Outer {\n';
code += ` inner : Inner;\n`;
code += '};\n\n';
code += generateShader({
attribute: '',
type: 'Outer',
stage: t.params.target_stage,
io: t.params.target_io,
use_struct: false,
});
// Expect to pass only if the struct is not used for entry point IO.
t.expectCompileResult(t.params.target_stage === '', code);
});
g.test('duplicates')
.desc(`Test that duplicated user-defined IO attributes are validated.`)
.params(u =>
u
// Place two [[location(0)]] attributes onto the entry point function.
// The function:
// - has two non-struct parameters (`p1` and `p2`)
// - has two struct parameters each with two members (`s1{a,b}` and `s2{a,b}`)
// - returns a struct with two members (`ra` and `rb`)
// By default, all of these user-defined IO variables will have unique location attributes.
.combine('first', ['p1', 's1a', 's2a', 'ra'] as const)
.combine('second', ['p2', 's1b', 's2b', 'rb'] as const)
.beginSubcases()
)
.fn(t => {
const p1 = t.params.first === 'p1' ? '0' : '1';
const p2 = t.params.second === 'p2' ? '0' : '2';
const s1a = t.params.first === 's1a' ? '0' : '3';
const s1b = t.params.second === 's1b' ? '0' : '4';
const s2a = t.params.first === 's2a' ? '0' : '5';
const s2b = t.params.second === 's2b' ? '0' : '6';
const ra = t.params.first === 'ra' ? '0' : '1';
const rb = t.params.second === 'rb' ? '0' : '2';
const code = `
struct S1 {
[[location(${s1a})]] a : f32;
[[location(${s1b})]] b : f32;
};
struct S2 {
[[location(${s2a})]] a : f32;
[[location(${s2b})]] b : f32;
};
struct R {
[[location(${ra})]] a : f32;
[[location(${rb})]] b : f32;
};
[[stage(fragment)]]
fn main([[location(${p1})]] p1 : f32,
[[location(${p2})]] p2 : f32,
s1 : S1,
s2 : S2,
) -> R {
return R();
}
`;
// The test should fail if both [[location(0)]] attributes are on the input parameters or
// structures, or it they are both on the output struct. Otherwise it should pass.
const firstIsRet = t.params.first === 'ra';
const secondIsRet = t.params.second === 'rb';
const expectation = firstIsRet !== secondIsRet;
t.expectCompileResult(expectation, code);
});
|
sarahM0/cts
|
src/webgpu/api/operation/command_buffer/render/state_tracking.spec.ts
|
<filename>src/webgpu/api/operation/command_buffer/render/state_tracking.spec.ts<gh_stars>10-100
export const description = `
Ensure state is set correctly. Tries to stress state caching (setting different states multiple
times in different orders) for setIndexBuffer and setVertexBuffer.
Equivalent tests for setBindGroup and setPipeline are in programmable/state_tracking.spec.ts.
Equivalent tests for viewport/scissor/blend/reference are in render/dynamic_state.spec.ts
TODO: plan and implement
- try setting states multiple times in different orders, check state is correct in a draw call.
- setIndexBuffer: specifically test changing the format, offset, size, without changing the buffer
- setVertexBuffer: specifically test changing the offset, size, without changing the buffer
- try changing the pipeline {before,after} the vertex/index buffers.
(In D3D12, the vertex buffer stride is part of SetVertexBuffer instead of the pipeline.)
- Test that drawing after having set vertex buffer slots not used by the pipeline.
- Test that setting / not setting the index buffer does not impact a non-indexed draw.
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
|
sarahM0/cts
|
src/webgpu/api/validation/buffer/mapping.spec.ts
|
<filename>src/webgpu/api/validation/buffer/mapping.spec.ts
export const description = `
Validation tests for GPUBuffer.mapAsync, GPUBuffer.unmap and GPUBuffer.getMappedRange.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { attemptGarbageCollection } from '../../../../common/util/collect_garbage.js';
import { assert, unreachable } from '../../../../common/util/util.js';
import { kBufferUsages } from '../../../capability_info.js';
import { GPUConst } from '../../../constants.js';
import { ValidationTest } from '../validation_test.js';
class F extends ValidationTest {
async testMapAsyncCall(
success: boolean,
rejectName: string | null,
buffer: GPUBuffer,
mode: GPUMapModeFlags,
offset?: number,
size?: number
) {
if (success) {
const p = buffer.mapAsync(mode, offset, size);
await p;
} else {
let p: Promise<void>;
this.expectValidationError(() => {
p = buffer.mapAsync(mode, offset, size);
});
try {
await p!;
assert(rejectName === null, 'mapAsync unexpectedly passed');
} catch (ex) {
assert(rejectName === ex.name, `mapAsync rejected unexpectedly with: ${ex}`);
}
}
}
testGetMappedRangeCall(success: boolean, buffer: GPUBuffer, offset?: number, size?: number) {
if (success) {
const data = buffer.getMappedRange(offset, size);
this.expect(data instanceof ArrayBuffer);
if (size !== undefined) {
this.expect(data.byteLength === size);
}
} else {
this.shouldThrow('OperationError', () => {
buffer.getMappedRange(offset, size);
});
}
}
createMappableBuffer(type: GPUMapModeFlags, size: number): GPUBuffer {
switch (type) {
case GPUMapMode.READ:
return this.device.createBuffer({
size,
usage: GPUBufferUsage.MAP_READ,
});
case GPUMapMode.WRITE:
return this.device.createBuffer({
size,
usage: GPUBufferUsage.MAP_WRITE,
});
default:
unreachable();
}
}
}
export const g = makeTestGroup(F);
const kMapModeOptions = [GPUConst.MapMode.READ, GPUConst.MapMode.WRITE];
const kOffsetAlignment = 8;
const kSizeAlignment = 4;
g.test('mapAsync,usage')
.desc(
`Test the usage validation for mapAsync.
For each buffer usage:
For GPUMapMode.READ, GPUMapMode.WRITE, and 0:
Test that the mapAsync call is valid iff the mapping usage is not 0 and the buffer usage
the mapMode flag.`
)
.paramsSubcasesOnly(u =>
u //
.combineWithParams([
{ mapMode: GPUConst.MapMode.READ, validUsage: GPUConst.BufferUsage.MAP_READ },
{ mapMode: GPUConst.MapMode.WRITE, validUsage: GPUConst.BufferUsage.MAP_WRITE },
// Using mapMode 0 is never valid, so there is no validUsage.
{ mapMode: 0, validUsage: null },
])
.combine('usage', kBufferUsages)
)
.fn(async t => {
const { mapMode, validUsage, usage } = t.params;
const buffer = t.device.createBuffer({
size: 16,
usage,
});
const success = usage === validUsage;
await t.testMapAsyncCall(success, 'OperationError', buffer, mapMode);
});
g.test('mapAsync,invalidBuffer')
.desc('Test that mapAsync is an error when called on an invalid buffer.')
.paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
.fn(async t => {
const { mapMode } = t.params;
const buffer = t.getErrorBuffer();
await t.testMapAsyncCall(false, 'OperationError', buffer, mapMode);
});
g.test('mapAsync,state,destroyed')
.desc('Test that mapAsync is an error when called on a destroyed buffer.')
.paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
.fn(async t => {
const { mapMode } = t.params;
const buffer = t.createMappableBuffer(mapMode, 16);
buffer.destroy();
await t.testMapAsyncCall(false, 'OperationError', buffer, mapMode);
});
g.test('mapAsync,state,mappedAtCreation')
.desc(
`Test that mapAsync is an error when called on a buffer mapped at creation,
but succeeds after unmapping it.`
)
.paramsSubcasesOnly([
{ mapMode: GPUConst.MapMode.READ, validUsage: GPUConst.BufferUsage.MAP_READ },
{ mapMode: GPUConst.MapMode.WRITE, validUsage: GPUConst.BufferUsage.MAP_WRITE },
])
.fn(async t => {
const { mapMode, validUsage } = t.params;
const buffer = t.device.createBuffer({
size: 16,
usage: validUsage,
mappedAtCreation: true,
});
await t.testMapAsyncCall(false, 'OperationError', buffer, mapMode);
buffer.unmap();
t.testMapAsyncCall(true, null, buffer, mapMode);
});
g.test('mapAsync,state,mapped')
.desc(
`Test that mapAsync is an error when called on a mapped buffer, but succeeds
after unmapping it.`
)
.paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
.fn(async t => {
const { mapMode } = t.params;
const buffer = t.createMappableBuffer(mapMode, 16);
await t.testMapAsyncCall(true, null, buffer, mapMode);
await t.testMapAsyncCall(false, 'OperationError', buffer, mapMode);
buffer.unmap();
await t.testMapAsyncCall(true, null, buffer, mapMode);
});
g.test('mapAsync,state,mappingPending')
.desc(
`Test that mapAsync is an error when called on a buffer that is being mapped,
but succeeds after the previous mapping request is cancelled.`
)
.paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
.fn(async t => {
const { mapMode } = t.params;
const buffer = t.createMappableBuffer(mapMode, 16);
// Start mapping the buffer, we are going to unmap it before it resolves so it will reject
// the mapping promise with an AbortError.
t.shouldReject('AbortError', buffer.mapAsync(mapMode));
// Do the test of mapAsync while [[state]] is mapping pending. It has to be synchronous so
// that we can unmap the previous mapping in the same stack frame and check this one doesn't
// get canceled, but instead is treated as a real error.
t.expectValidationError(() => {
t.shouldReject('OperationError', buffer.mapAsync(mapMode));
});
// Unmap the first mapping. It should now be possible to successfully call mapAsync
buffer.unmap();
await t.testMapAsyncCall(true, null, buffer, mapMode);
});
g.test('mapAsync,sizeUnspecifiedOOB')
.desc(
`Test that mapAsync with size unspecified rejects if offset > buffer.[[size]],
with various cases at the limits of the buffer size or with a misaligned offset.
Also test for an empty buffer.`
)
.paramsSubcasesOnly(u =>
u //
.combine('mapMode', kMapModeOptions)
.combineWithParams([
// 0 size buffer.
{ bufferSize: 0, offset: 0 },
{ bufferSize: 0, offset: 1 },
{ bufferSize: 0, offset: kOffsetAlignment },
// Test with a buffer that's not empty.
{ bufferSize: 16, offset: 0 },
{ bufferSize: 16, offset: kOffsetAlignment },
{ bufferSize: 16, offset: 16 },
{ bufferSize: 16, offset: 17 },
{ bufferSize: 16, offset: 16 + kOffsetAlignment },
])
)
.fn(async t => {
const { mapMode, bufferSize, offset } = t.params;
const buffer = t.createMappableBuffer(mapMode, bufferSize);
const success = offset <= bufferSize;
await t.testMapAsyncCall(success, 'OperationError', buffer, mapMode, offset);
});
g.test('mapAsync,offsetAndSizeAlignment')
.desc("Test that mapAsync fails if the alignment of offset and size isn't correct.")
.paramsSubcasesOnly(u =>
u
.combine('mapMode', kMapModeOptions)
.combine('offset', [0, kOffsetAlignment, kOffsetAlignment / 2])
.combine('size', [0, kSizeAlignment, kSizeAlignment / 2])
)
.fn(async t => {
const { mapMode, offset, size } = t.params;
const buffer = t.createMappableBuffer(mapMode, 16);
const success = offset % kOffsetAlignment === 0 && size % kSizeAlignment === 0;
await t.testMapAsyncCall(success, 'OperationError', buffer, mapMode, offset, size);
});
g.test('mapAsync,offsetAndSizeOOB')
.desc('Test that mapAsync fails if offset + size is larger than the buffer size.')
.paramsSubcasesOnly(u =>
u //
.combine('mapMode', kMapModeOptions)
.combineWithParams([
// For a 0 size buffer
{ bufferSize: 0, offset: 0, size: 0 },
{ bufferSize: 0, offset: 0, size: 4 },
{ bufferSize: 0, offset: 8, size: 0 },
// For a small buffer
{ bufferSize: 16, offset: 0, size: 16 },
{ bufferSize: 16, offset: kOffsetAlignment, size: 16 },
{ bufferSize: 16, offset: 16, size: 0 },
{ bufferSize: 16, offset: 16, size: kSizeAlignment },
{ bufferSize: 16, offset: 8, size: 0 },
{ bufferSize: 16, offset: 8, size: 8 },
{ bufferSize: 16, offset: 8, size: 8 + kSizeAlignment },
// For a larger buffer
{ bufferSize: 1024, offset: 0, size: 1024 },
{ bufferSize: 1024, offset: kOffsetAlignment, size: 1024 },
{ bufferSize: 1024, offset: 1024, size: 0 },
{ bufferSize: 1024, offset: 1024, size: kSizeAlignment },
{ bufferSize: 1024, offset: 512, size: 0 },
{ bufferSize: 1024, offset: 512, size: 512 },
{ bufferSize: 1024, offset: 512, size: 512 + kSizeAlignment },
])
)
.fn(async t => {
const { mapMode, bufferSize, size, offset } = t.params;
const buffer = t.createMappableBuffer(mapMode, bufferSize);
const success = offset + size <= bufferSize;
await t.testMapAsyncCall(success, 'OperationError', buffer, mapMode, offset, size);
});
g.test('getMappedRange,state,mapped')
.desc('Test that it is valid to call getMappedRange in the mapped state')
.paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
.fn(async t => {
const { mapMode } = t.params;
const bufferSize = 16;
const buffer = t.createMappableBuffer(mapMode, bufferSize);
await buffer.mapAsync(mapMode);
const data = buffer.getMappedRange();
t.expect(data instanceof ArrayBuffer);
t.expect(data.byteLength === bufferSize);
t.expectValidationError(() => {
// map on already mapped buffer should be rejected
const mapping = buffer.mapAsync(mapMode);
t.expect(data.byteLength === bufferSize);
t.shouldReject('OperationError', mapping);
});
t.expect(data.byteLength === bufferSize);
buffer.unmap();
t.expect(data.byteLength === 0);
});
g.test('getMappedRange,state,mappedAtCreation')
.desc(
`Test that, in the mapped-at-creation state, it is valid to call getMappedRange, for all buffer usages,
and invalid to call mapAsync, for all map modes.`
)
.paramsSubcasesOnly(u =>
u.combine('bufferUsage', kBufferUsages).combine('mapMode', kMapModeOptions)
)
.fn(async t => {
const { bufferUsage, mapMode } = t.params;
const bufferSize = 16;
const buffer = t.device.createBuffer({
usage: bufferUsage,
size: bufferSize,
mappedAtCreation: true,
});
const data = buffer.getMappedRange();
t.expect(data instanceof ArrayBuffer);
t.expect(data.byteLength === bufferSize);
t.expectValidationError(() => {
// map on already mapped buffer should be rejected
const mapping = buffer.mapAsync(mapMode);
t.expect(data.byteLength === bufferSize);
t.shouldReject('OperationError', mapping);
});
t.expect(data.byteLength === bufferSize);
buffer.unmap();
t.expect(data.byteLength === 0);
});
g.test('getMappedRange,state,invalid_mappedAtCreation')
.desc(
`mappedAtCreation should return a mapped buffer, even if the buffer is invalid.
Like VRAM allocation (see map_oom), validation can be performed asynchronously (in the GPU process)
so the Content process doesn't necessarily know the buffer is invalid.`
)
.fn(async t => {
const buffer = t.expectGPUError('validation', () =>
t.device.createBuffer({
mappedAtCreation: true,
size: 16,
usage: 0xffff_ffff, // Invalid usage
})
);
// Should still be valid.
buffer.getMappedRange();
});
g.test('getMappedRange,state,mappedAgain')
.desc(
'Test that it is valid to call getMappedRange in the mapped state, even if there is a duplicate mapAsync before'
)
.paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
.fn(async t => {
const { mapMode } = t.params;
const buffer = t.createMappableBuffer(mapMode, 16);
await buffer.mapAsync(mapMode);
// call mapAsync again on already mapped buffer should fail
await t.testMapAsyncCall(false, 'OperationError', buffer, mapMode);
// getMapppedRange should still success
t.testGetMappedRangeCall(true, buffer);
});
g.test('getMappedRange,state,unmapped')
.desc(
`Test that it is invalid to call getMappedRange in the unmapped state.
Test for various cases of being unmapped: at creation, after a mapAsync call or after being created mapped.`
)
.fn(async t => {
// It is invalid to call getMappedRange when the buffer starts unmapped when created.
{
const buffer = t.createMappableBuffer(GPUMapMode.READ, 16);
t.testGetMappedRangeCall(false, buffer);
}
// It is invalid to call getMappedRange when the buffer is unmapped after mapAsync.
{
const buffer = t.createMappableBuffer(GPUMapMode.READ, 16);
await buffer.mapAsync(GPUMapMode.READ);
buffer.unmap();
t.testGetMappedRangeCall(false, buffer);
}
// It is invalid to call getMappedRange when the buffer is unmapped after mappedAtCreation.
{
const buffer = t.device.createBuffer({
usage: GPUBufferUsage.MAP_READ,
size: 16,
mappedAtCreation: true,
});
buffer.unmap();
t.testGetMappedRangeCall(false, buffer);
}
});
g.test('getMappedRange,subrange,mapped')
.desc(
`Test that old getMappedRange returned arraybuffer does not exist after unmap, and newly returned
arraybuffer after new map has correct subrange`
)
.params(u => u.combine('mapMode', kMapModeOptions))
.fn(async t => {
const { mapMode } = t.params;
const bufferSize = 16;
const offset = 8;
const subrangeSize = bufferSize - offset;
const buffer = t.createMappableBuffer(mapMode, bufferSize);
await buffer.mapAsync(mapMode);
const data0 = buffer.getMappedRange();
t.expect(data0 instanceof ArrayBuffer);
t.expect(data0.byteLength === bufferSize);
buffer.unmap();
t.expect(data0.byteLength === 0);
await buffer.mapAsync(mapMode, offset);
const data1 = buffer.getMappedRange(8);
t.expect(data0.byteLength === 0);
t.expect(data1.byteLength === subrangeSize);
});
g.test('getMappedRange,subrange,mappedAtCreation')
.desc(
`Test that old getMappedRange returned arrybuffer does not exist after unmap and newly returned
arraybuffer after new map has correct subrange`
)
.fn(async t => {
const bufferSize = 16;
const offset = 8;
const subrangeSize = bufferSize - offset;
const buffer = t.device.createBuffer({
size: bufferSize,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
mappedAtCreation: true,
});
const data0 = buffer.getMappedRange();
t.expect(data0 instanceof ArrayBuffer);
t.expect(data0.byteLength === bufferSize);
buffer.unmap();
t.expect(data0.byteLength === 0);
await buffer.mapAsync(GPUMapMode.READ, offset);
const data1 = buffer.getMappedRange(8);
t.expect(data0.byteLength === 0);
t.expect(data1.byteLength === subrangeSize);
});
g.test('getMappedRange,state,destroyed')
.desc(
`Test that it is invalid to call getMappedRange in the destroyed state.
Test for various cases of being destroyed: at creation, after a mapAsync call or after being created mapped.`
)
.fn(async t => {
// It is invalid to call getMappedRange when the buffer is destroyed when unmapped.
{
const buffer = t.createMappableBuffer(GPUMapMode.READ, 16);
buffer.destroy();
t.testGetMappedRangeCall(false, buffer);
}
// It is invalid to call getMappedRange when the buffer is destroyed when mapped.
{
const buffer = t.createMappableBuffer(GPUMapMode.READ, 16);
await buffer.mapAsync(GPUMapMode.READ);
buffer.destroy();
t.testGetMappedRangeCall(false, buffer);
}
// It is invalid to call getMappedRange when the buffer is destroyed when mapped at creation.
{
const buffer = t.device.createBuffer({
usage: GPUBufferUsage.MAP_READ,
size: 16,
mappedAtCreation: true,
});
buffer.destroy();
t.testGetMappedRangeCall(false, buffer);
}
});
g.test('getMappedRange,state,mappingPending')
.desc('Test that it is invalid to call getMappedRange in the mappingPending state.')
.paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
.fn(async t => {
const { mapMode } = t.params;
const buffer = t.createMappableBuffer(mapMode, 16);
/* noawait */ const mapping0 = buffer.mapAsync(mapMode);
t.expectValidationError(() => {
// seconding mapping should be rejected
t.shouldReject('OperationError', buffer.mapAsync(mapMode));
});
// invalid in mappingPending state
t.testGetMappedRangeCall(false, buffer);
await mapping0;
// valid after buffer is mapped
t.testGetMappedRangeCall(true, buffer);
});
g.test('getMappedRange,offsetAndSizeAlignment,mapped')
.desc(`Test that getMappedRange fails if the alignment of offset and size isn't correct.`)
.params(u =>
u
.combine('mapMode', kMapModeOptions)
.beginSubcases()
.combine('mapOffset', [0, kOffsetAlignment])
.combine('offset', [0, kOffsetAlignment, kOffsetAlignment / 2])
.combine('size', [0, kSizeAlignment, kSizeAlignment / 2])
)
.fn(async t => {
const { mapMode, mapOffset, offset, size } = t.params;
const buffer = t.createMappableBuffer(mapMode, 32);
await buffer.mapAsync(mapMode, mapOffset);
const success = offset % kOffsetAlignment === 0 && size % kSizeAlignment === 0;
t.testGetMappedRangeCall(success, buffer, offset + mapOffset, size);
});
g.test('getMappedRange,offsetAndSizeAlignment,mappedAtCreation')
.desc(`Test that getMappedRange fails if the alignment of offset and size isn't correct.`)
.paramsSubcasesOnly(u =>
u
.combine('offset', [0, kOffsetAlignment, kOffsetAlignment / 2])
.combine('size', [0, kSizeAlignment, kSizeAlignment / 2])
)
.fn(async t => {
const { offset, size } = t.params;
const buffer = t.device.createBuffer({
size: 16,
usage: GPUBufferUsage.COPY_DST,
mappedAtCreation: true,
});
const success = offset % kOffsetAlignment === 0 && size % kSizeAlignment === 0;
t.testGetMappedRangeCall(success, buffer, offset, size);
});
g.test('getMappedRange,sizeAndOffsetOOB,mappedAtCreation')
.desc(
`Test that getMappedRange size + offset must be less than the buffer size for a
buffer mapped at creation. (and offset has not constraints on its own)`
)
.paramsSubcasesOnly([
// Tests for a zero-sized buffer, with and without a size defined.
{ bufferSize: 0, offset: undefined, size: undefined },
{ bufferSize: 0, offset: undefined, size: 0 },
{ bufferSize: 0, offset: undefined, size: kSizeAlignment },
{ bufferSize: 0, offset: 0, size: undefined },
{ bufferSize: 0, offset: 0, size: 0 },
{ bufferSize: 0, offset: kOffsetAlignment, size: undefined },
{ bufferSize: 0, offset: kOffsetAlignment, size: 0 },
// Tests for a non-empty buffer, with an undefined offset.
{ bufferSize: 80, offset: undefined, size: 80 },
{ bufferSize: 80, offset: undefined, size: 80 + kSizeAlignment },
// Tests for a non-empty buffer, with an undefined size.
{ bufferSize: 80, offset: undefined, size: undefined },
{ bufferSize: 80, offset: 0, size: undefined },
{ bufferSize: 80, offset: kOffsetAlignment, size: undefined },
{ bufferSize: 80, offset: 80, size: undefined },
{ bufferSize: 80, offset: 80 + kOffsetAlignment, size: undefined },
// Tests for a non-empty buffer with a size defined.
{ bufferSize: 80, offset: 0, size: 80 },
{ bufferSize: 80, offset: 0, size: 80 + kSizeAlignment },
{ bufferSize: 80, offset: kOffsetAlignment, size: 80 },
{ bufferSize: 80, offset: 40, size: 40 },
{ bufferSize: 80, offset: 40 + kOffsetAlignment, size: 40 },
{ bufferSize: 80, offset: 40, size: 40 + kSizeAlignment },
])
.fn(t => {
const { bufferSize, offset, size } = t.params;
const buffer = t.device.createBuffer({
size: bufferSize,
usage: GPUBufferUsage.COPY_DST,
mappedAtCreation: true,
});
const actualOffset = offset ?? 0;
const actualSize = size ?? bufferSize - actualOffset;
const success = actualOffset <= bufferSize && actualOffset + actualSize <= bufferSize;
t.testGetMappedRangeCall(success, buffer, offset, size);
});
g.test('getMappedRange,sizeAndOffsetOOB,mapped')
.desc('Test that getMappedRange size + offset must be less than the mapAsync range.')
.paramsSubcasesOnly(u =>
u //
.combine('mapMode', kMapModeOptions)
.combineWithParams([
// Tests for an empty buffer, and implicit mapAsync size.
{ bufferSize: 0, mapOffset: 0, mapSize: undefined, offset: undefined, size: undefined },
{ bufferSize: 0, mapOffset: 0, mapSize: undefined, offset: undefined, size: 0 },
{
bufferSize: 0,
mapOffset: 0,
mapSize: undefined,
offset: undefined,
size: kSizeAlignment,
},
{ bufferSize: 0, mapOffset: 0, mapSize: undefined, offset: 0, size: undefined },
{ bufferSize: 0, mapOffset: 0, mapSize: undefined, offset: 0, size: 0 },
{
bufferSize: 0,
mapOffset: 0,
mapSize: undefined,
offset: kOffsetAlignment,
size: undefined,
},
{ bufferSize: 0, mapOffset: 0, mapSize: undefined, offset: kOffsetAlignment, size: 0 },
// Tests for an empty buffer, and explicit mapAsync size.
{ bufferSize: 0, mapOffset: 0, mapSize: 0, offset: undefined, size: undefined },
{ bufferSize: 0, mapOffset: 0, mapSize: 0, offset: 0, size: undefined },
{ bufferSize: 0, mapOffset: 0, mapSize: 0, offset: 0, size: 0 },
{ bufferSize: 0, mapOffset: 0, mapSize: 0, offset: kOffsetAlignment, size: undefined },
{ bufferSize: 0, mapOffset: 0, mapSize: 0, offset: kOffsetAlignment, size: 0 },
// Test for a fully implicit mapAsync call
{ bufferSize: 80, mapOffset: undefined, mapSize: undefined, offset: 0, size: 80 },
{
bufferSize: 80,
mapOffset: undefined,
mapSize: undefined,
offset: 0,
size: 80 + kSizeAlignment,
},
{
bufferSize: 80,
mapOffset: undefined,
mapSize: undefined,
offset: kOffsetAlignment,
size: 80,
},
// Test for a mapAsync call with an implicit size
{ bufferSize: 80, mapOffset: 24, mapSize: undefined, offset: 24, size: 80 - 24 },
{
bufferSize: 80,
mapOffset: 24,
mapSize: undefined,
offset: 0,
size: 80 - 24 + kSizeAlignment,
},
{
bufferSize: 80,
mapOffset: 24,
mapSize: undefined,
offset: kOffsetAlignment,
size: 80 - 24,
},
// Test for a non-empty buffer fully mapped.
{ bufferSize: 80, mapOffset: 0, mapSize: 80, offset: 0, size: 80 },
{ bufferSize: 80, mapOffset: 0, mapSize: 80, offset: kOffsetAlignment, size: 80 },
{ bufferSize: 80, mapOffset: 0, mapSize: 80, offset: 0, size: 80 + kSizeAlignment },
{ bufferSize: 80, mapOffset: 0, mapSize: 80, offset: 40, size: 40 },
{ bufferSize: 80, mapOffset: 0, mapSize: 80, offset: 40 + kOffsetAlignment, size: 40 },
{ bufferSize: 80, mapOffset: 0, mapSize: 80, offset: 40, size: 40 + kSizeAlignment },
// Test for a buffer partially mapped.
{ bufferSize: 80, mapOffset: 24, mapSize: 40, offset: 24, size: 40 },
{ bufferSize: 80, mapOffset: 24, mapSize: 40, offset: 24 - kOffsetAlignment, size: 40 },
{ bufferSize: 80, mapOffset: 24, mapSize: 40, offset: 24 + kOffsetAlignment, size: 40 },
{ bufferSize: 80, mapOffset: 24, mapSize: 40, offset: 24, size: 40 + kSizeAlignment },
// Test for a partially mapped buffer with implicit size and offset for getMappedRange.
// - Buffer partially mapped in the middle
{ bufferSize: 80, mapOffset: 24, mapSize: 40, offset: undefined, size: undefined },
{ bufferSize: 80, mapOffset: 24, mapSize: 40, offset: 0, size: undefined },
{ bufferSize: 80, mapOffset: 24, mapSize: 40, offset: 24, size: undefined },
// - Buffer partially mapped to the end
{ bufferSize: 80, mapOffset: 24, mapSize: undefined, offset: 24, size: undefined },
{ bufferSize: 80, mapOffset: 24, mapSize: undefined, offset: 80, size: undefined },
// - Buffer partially mapped from the start
{ bufferSize: 80, mapOffset: 0, mapSize: 64, offset: undefined, size: undefined },
{ bufferSize: 80, mapOffset: 0, mapSize: 64, offset: undefined, size: 64 },
])
)
.fn(async t => {
const { mapMode, bufferSize, mapOffset, mapSize, offset, size } = t.params;
const buffer = t.createMappableBuffer(mapMode, bufferSize);
await buffer.mapAsync(mapMode, mapOffset, mapSize);
const actualMapOffset = mapOffset ?? 0;
const actualMapSize = mapSize ?? bufferSize - actualMapOffset;
const actualOffset = offset ?? 0;
const actualSize = size ?? bufferSize - actualOffset;
const success =
actualOffset >= actualMapOffset &&
actualOffset <= bufferSize &&
actualOffset + actualSize <= actualMapOffset + actualMapSize;
t.testGetMappedRangeCall(success, buffer, offset, size);
});
g.test('getMappedRange,disjointRanges')
.desc('Test that the ranges asked through getMappedRange must be disjoint.')
.paramsSubcasesOnly(u =>
u //
.combine('remapBetweenCalls', [false, true])
.combineWithParams([
// Disjoint ranges with one that's empty.
{ offset1: 8, size1: 0, offset2: 8, size2: 8 },
{ offset1: 16, size1: 0, offset2: 8, size2: 8 },
{ offset1: 8, size1: 8, offset2: 8, size2: 0 },
{ offset1: 8, size1: 8, offset2: 16, size2: 0 },
// Disjoint ranges with both non-empty.
{ offset1: 0, size1: 8, offset2: 8, size2: 8 },
{ offset1: 16, size1: 8, offset2: 8, size2: 8 },
{ offset1: 8, size1: 8, offset2: 0, size2: 8 },
{ offset1: 8, size1: 8, offset2: 16, size2: 8 },
// Empty range contained inside another one.
{ offset1: 16, size1: 20, offset2: 24, size2: 0 },
{ offset1: 24, size1: 0, offset2: 16, size2: 20 },
// Ranges that overlap only partially.
{ offset1: 16, size1: 20, offset2: 8, size2: 20 },
{ offset1: 16, size1: 20, offset2: 32, size2: 20 },
// Ranges that include one another.
{ offset1: 0, size1: 80, offset2: 16, size2: 20 },
{ offset1: 16, size1: 20, offset2: 0, size2: 80 },
])
)
.fn(async t => {
const { offset1, size1, offset2, size2, remapBetweenCalls } = t.params;
const buffer = t.device.createBuffer({ size: 80, usage: GPUBufferUsage.MAP_READ });
await buffer.mapAsync(GPUMapMode.READ);
t.testGetMappedRangeCall(true, buffer, offset1, size1);
if (remapBetweenCalls) {
buffer.unmap();
await buffer.mapAsync(GPUMapMode.READ);
}
const range1StartsAfter2 = offset1 >= offset2 + size2;
const range2StartsAfter1 = offset2 >= offset1 + size1;
const disjoint = range1StartsAfter2 || range2StartsAfter1;
const success = disjoint || remapBetweenCalls;
t.testGetMappedRangeCall(success, buffer, offset2, size2);
});
g.test('getMappedRange,disjoinRanges_many')
.desc('Test getting a lot of small ranges, and that the disjoint check checks them all.')
.fn(async t => {
const kStride = 256;
const kNumStrides = 256;
const buffer = t.device.createBuffer({
size: kStride * kNumStrides,
usage: GPUBufferUsage.MAP_READ,
});
await buffer.mapAsync(GPUMapMode.READ);
// Get a lot of small mapped ranges.
for (let stride = 0; stride < kNumStrides; stride++) {
t.testGetMappedRangeCall(true, buffer, stride * kStride, 8);
}
// Check for each range it is invalid to get a range that overlaps it and check that it is valid
// to get ranges for the rest of the buffer.
for (let stride = 0; stride < kNumStrides; stride++) {
t.testGetMappedRangeCall(false, buffer, stride * kStride, kStride);
t.testGetMappedRangeCall(true, buffer, stride * kStride + 8, kStride - 8);
}
});
g.test('unmap,state,unmapped')
.desc(
`Test it is invalid to call unmap on a buffer that is unmapped (at creation, or after
mappedAtCreation or mapAsync)`
)
.fn(async t => {
// It is invalid to call unmap after creation of an unmapped buffer.
{
const buffer = t.device.createBuffer({ size: 16, usage: GPUBufferUsage.MAP_READ });
t.expectValidationError(() => {
buffer.unmap();
});
}
// It is invalid to call unmap after unmapping a mapAsynced buffer.
{
const buffer = t.createMappableBuffer(GPUMapMode.READ, 16);
await buffer.mapAsync(GPUMapMode.READ);
buffer.unmap();
t.expectValidationError(() => {
buffer.unmap();
});
}
// It is invalid to call unmap after unmapping a mappedAtCreation buffer.
{
const buffer = t.device.createBuffer({
usage: GPUBufferUsage.MAP_READ,
size: 16,
mappedAtCreation: true,
});
buffer.unmap();
t.expectValidationError(() => {
buffer.unmap();
});
}
});
g.test('unmap,state,destroyed')
.desc(
`Test it is invalid to call unmap on a buffer that is destroyed (at creation, or after
mappedAtCreation or mapAsync)`
)
.fn(async t => {
// It is invalid to call unmap after destruction of an unmapped buffer.
{
const buffer = t.device.createBuffer({ size: 16, usage: GPUBufferUsage.MAP_READ });
buffer.destroy();
t.expectValidationError(() => {
buffer.unmap();
});
}
// It is invalid to call unmap after destroying a mapAsynced buffer.
{
const buffer = t.createMappableBuffer(GPUMapMode.READ, 16);
await buffer.mapAsync(GPUMapMode.READ);
buffer.destroy();
t.expectValidationError(() => {
buffer.unmap();
});
}
// It is invalid to call unmap after destroying a mappedAtCreation buffer.
{
const buffer = t.device.createBuffer({
usage: GPUBufferUsage.MAP_READ,
size: 16,
mappedAtCreation: true,
});
buffer.destroy();
t.expectValidationError(() => {
buffer.unmap();
});
}
});
g.test('unmap,state,mappedAtCreation')
.desc('Test it is valid to call unmap on a buffer mapped at creation, for various usages')
.paramsSubcasesOnly(u =>
u //
.combine('bufferUsage', kBufferUsages)
)
.fn(t => {
const { bufferUsage } = t.params;
const buffer = t.device.createBuffer({ size: 16, usage: bufferUsage, mappedAtCreation: true });
buffer.unmap();
});
g.test('unmap,state,mapped')
.desc("Test it is valid to call unmap on a buffer that's mapped")
.paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
.fn(async t => {
const { mapMode } = t.params;
const buffer = t.createMappableBuffer(mapMode, 16);
await buffer.mapAsync(mapMode);
buffer.unmap();
});
g.test('unmap,state,mappingPending')
.desc("Test it is valid to call unmap on a buffer that's being mapped")
.paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
.fn(t => {
const { mapMode } = t.params;
const buffer = t.createMappableBuffer(mapMode, 16);
const mapping = buffer.mapAsync(mapMode);
t.shouldReject('AbortError', mapping);
buffer.unmap();
});
g.test('gc_behavior,mappedAtCreation')
.desc(
"Test that GCing the buffer while mappings are handed out doesn't invalidate them - mappedAtCreation case"
)
.fn(async t => {
let buffer = null;
buffer = t.device.createBuffer({
size: 256,
usage: GPUBufferUsage.COPY_DST,
mappedAtCreation: true,
});
// Write some non-zero data to the buffer.
const contents = new Uint32Array(buffer.getMappedRange());
for (let i = 0; i < contents.length; i++) {
contents[i] = i;
}
// Trigger garbage collection that should collect the buffer (or as if it collected it)
// NOTE: This won't fail unless the browser immediately starts reusing the memory, or gives it
// back to the OS. One good option for browsers to check their logic is good is to zero-out the
// memory on GPUBuffer (or internal gpu::Buffer-like object) destruction.
buffer = null;
await attemptGarbageCollection();
// Use the mapping again both for read and write, it should work.
for (let i = 0; i < contents.length; i++) {
t.expect(contents[i] === i);
contents[i] = i + 1;
}
});
g.test('gc_behavior,mapAsync')
.desc(
"Test that GCing the buffer while mappings are handed out doesn't invalidate them - mapAsync case"
)
.paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
.fn(async t => {
const { mapMode } = t.params;
let buffer = null;
buffer = t.createMappableBuffer(mapMode, 256);
await buffer.mapAsync(mapMode);
// Write some non-zero data to the buffer.
const contents = new Uint32Array(buffer.getMappedRange());
for (let i = 0; i < contents.length; i++) {
contents[i] = i;
}
// Trigger garbage collection that should collect the buffer (or as if it collected it)
// NOTE: This won't fail unless the browser immediately starts reusing the memory, or gives it
// back to the OS. One good option for browsers to check their logic is good is to zero-out the
// memory on GPUBuffer (or internal gpu::Buffer-like object) destruction.
buffer = null;
await attemptGarbageCollection();
// Use the mapping again both for read and write, it should work.
for (let i = 0; i < contents.length; i++) {
t.expect(contents[i] === i);
contents[i] = i + 1;
}
});
|
sarahM0/cts
|
src/unittests/unit_test.ts
|
<filename>src/unittests/unit_test.ts
import { Fixture } from '../common/framework/fixture.js';
export class UnitTest extends Fixture {}
|
sarahM0/cts
|
src/demo/json.spec.ts
|
<filename>src/demo/json.spec.ts
export const description = 'Description for a.spec.ts';
import { makeTestGroup } from '../common/framework/test_group.js';
import { UnitTest } from '../unittests/unit_test.js';
export const g = makeTestGroup(UnitTest);
g.test('json')
.paramsSimple([{ p: { x: 1, y: 'two' } }])
.fn(() => {});
|
sarahM0/cts
|
src/webgpu/api/validation/encoding/cmds/render/setPipeline.spec.ts
|
export const description = `
Validation tests for setPipeline on render pass and render bundle.
`;
import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
import { kRenderEncodeTypes } from '../../../../../util/command_buffer_maker.js';
import { ValidationTest } from '../../../validation_test.js';
import { kRenderEncodeTypeParams } from './render.js';
export const g = makeTestGroup(ValidationTest);
g.test('invalid_pipeline')
.desc(
`
Tests setPipeline should generate an error iff using an 'invalid' pipeline.
`
)
.paramsSubcasesOnly(u =>
u.combine('encoderType', kRenderEncodeTypes).combine('state', ['valid', 'invalid'] as const)
)
.fn(t => {
const { encoderType, state } = t.params;
const pipeline = t.createRenderPipelineWithState(state);
const { encoder, validateFinish } = t.createEncoder(encoderType);
encoder.setPipeline(pipeline);
validateFinish(state !== 'invalid');
});
g.test('pipeline,device_mismatch')
.desc('Tests setPipeline cannot be called with a render pipeline created from another device')
.paramsSubcasesOnly(kRenderEncodeTypeParams.combine('mismatched', [true, false]))
.unimplemented();
|
sarahM0/cts
|
src/webgpu/util/math.ts
|
<reponame>sarahM0/cts
import { assert } from '../../common/util/util.js';
import { kBit } from '../shader/execution/builtin/builtin.js';
import { f32Bits, Scalar } from './conversion.js';
/**
* A multiple of 8 guaranteed to be way too large to allocate (just under 8 pebibytes).
* This is a "safe" integer (ULP <= 1.0) very close to MAX_SAFE_INTEGER.
*
* Note: allocations of this size are likely to exceed limitations other than just the system's
* physical memory, so test cases are also needed to try to trigger "true" OOM.
*/
export const kMaxSafeMultipleOf8 = Number.MAX_SAFE_INTEGER - 7;
/** Round `n` up to the next multiple of `alignment` (inclusive). */
// TODO: Rename to `roundUp`
export function align(n: number, alignment: number): number {
assert(Number.isInteger(n) && n >= 0, 'n must be a non-negative integer');
assert(Number.isInteger(alignment) && alignment > 0, 'alignment must be a positive integer');
return Math.ceil(n / alignment) * alignment;
}
/** Round `n` down to the next multiple of `alignment` (inclusive). */
export function roundDown(n: number, alignment: number): number {
assert(Number.isInteger(n) && n >= 0, 'n must be a non-negative integer');
assert(Number.isInteger(alignment) && alignment > 0, 'alignment must be a positive integer');
return Math.floor(n / alignment) * alignment;
}
/** Clamp a number to the provided range. */
export function clamp(n: number, { min, max }: { min: number; max: number }): number {
assert(max >= min);
return Math.min(Math.max(n, min), max);
}
/**
* @returns the Units of Last Place difference between the numbers a and b.
* If either `a` or `b` are not finite numbers, then diffULP() returns Infinity.
*/
export function diffULP(a: number, b: number): number {
if (!Number.isFinite(a) || !Number.isFinite(b)) {
return Infinity;
}
const arr = new Uint32Array(new Float32Array([a, b]).buffer);
const u32_a = arr[0];
const u32_b = arr[1];
const sign_a = (u32_a & 0x80000000) !== 0;
const sign_b = (u32_b & 0x80000000) !== 0;
const masked_a = u32_a & 0x7fffffff;
const masked_b = u32_b & 0x7fffffff;
const subnormal_or_zero_a = (u32_a & 0x7f800000) === 0;
const subnormal_or_zero_b = (u32_b & 0x7f800000) === 0;
// If the number is subnormal, then reduce it to 0 for ULP comparison.
// If the number is normal then reduce its bits-representation so to that we
// can pretend that the subnormal numbers don't exist, for the purposes of
// counting ULP steps from zero (or any subnormal) to any of the normal numbers.
const bits_a = subnormal_or_zero_a ? 0 : masked_a - 0x7fffff;
const bits_b = subnormal_or_zero_b ? 0 : masked_b - 0x7fffff;
if (sign_a === sign_b) {
return Math.max(bits_a, bits_b) - Math.min(bits_a, bits_b);
}
return bits_a + bits_b;
}
/**
* @returns the next single precision floating point value after |val|,
* towards +inf if |dir| is true, otherwise towards -inf.
* For -/+0 the nextAfter will be the closest subnormal in the correct
* direction, since -0 === +0.
*/
export function nextAfter(val: number, dir: boolean = true): Scalar {
if (Number.isNaN(val)) {
return f32Bits(kBit.f32.nan.positive.s);
}
if (val === Number.POSITIVE_INFINITY) {
return f32Bits(kBit.f32.infinity.positive);
}
if (val === Number.NEGATIVE_INFINITY) {
return f32Bits(kBit.f32.infinity.negative);
}
const u32_val = new Uint32Array(new Float32Array([val]).buffer)[0];
if (u32_val === kBit.f32.positive.zero || u32_val === kBit.f32.negative.zero) {
if (dir) {
return f32Bits(kBit.f32.subnormal.positive.min);
} else {
return f32Bits(kBit.f32.subnormal.negative.max);
}
}
let result = u32_val;
const is_positive = (u32_val & 0x80000000) === 0;
if (dir === is_positive) {
result += 1;
} else {
result -= 1;
}
// Checking for overflow
if ((result & 0x7f800000) === 0x7f800000) {
if (dir) {
return f32Bits(kBit.f32.infinity.positive);
} else {
return f32Bits(kBit.f32.infinity.negative);
}
}
return f32Bits(result);
}
|
sarahM0/cts
|
src/webgpu/api/operation/vertex_state/basic.spec.ts
|
export const description = `
- Baseline tests checking vertex/instance IDs, with:
- No vertexState at all (i.e. no vertex buffers)
- One vertex buffer with no attributes
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
|
sarahM0/cts
|
src/webgpu/web_platform/canvas/getPreferredFormat.spec.ts
|
<filename>src/webgpu/web_platform/canvas/getPreferredFormat.spec.ts
export const description = `
Tests for GPUCanvasContext.getPreferredFormat.
`;
import { Fixture } from '../../../common/framework/fixture.js';
import { makeTestGroup } from '../../../common/framework/test_group.js';
export const g = makeTestGroup(Fixture);
g.test('value').desc(`Ensure getPreferredFormat returns one of the valid values.`).unimplemented();
|
sarahM0/cts
|
src/webgpu/api/operation/sampling/lod_clamp.spec.ts
|
export const description = `
Tests the behavior of LOD clamping (lodMinClamp, lodMaxClamp).
TODO:
- Write a test that can test the exact clamping behavior
- Test a bunch of values, including very large/small ones.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
|
sarahM0/cts
|
src/webgpu/api/validation/create_pipeline.spec.ts
|
export const description = `
TODO:
For {createRenderPipeline, createComputePipeline}, start with a valid descriptor (control case),
then for each stage {{vertex, fragment}, compute}, make exactly one of the following errors:
- one stage's module is an invalid object
- one stage's entryPoint doesn't exist
- {different name, empty string, name that's almost the same but differs in some subtle unicode way}
`;
import { makeTestGroup } from '../../../common/framework/test_group.js';
import { ValidationTest } from './validation_test.js';
export const g = makeTestGroup(ValidationTest);
|
sarahM0/cts
|
src/webgpu/api/operation/buffers/map.spec.ts
|
export const description = `
Test the operation of buffer mapping, specifically the data contents written via
map-write/mappedAtCreation, and the contents of buffers returned by getMappedRange on
buffers which are mapped-read/mapped-write/mappedAtCreation.
range: used for getMappedRange
mapRegion: used for mapAsync
mapRegionBoundModes is used to get mapRegion from range:
- default-expand: expand mapRegion to buffer bound by setting offset/size to undefined
- explicit-expand: expand mapRegion to buffer bound by explicitly calculating offset/size
- minimal: make mapRegion to be the same as range which is the minimal range to make getMappedRange input valid
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { assert, memcpy } from '../../../../common/util/util.js';
import { checkElementsEqual } from '../../../util/check_contents.js';
import { MappingTest } from './mapping_test.js';
export const g = makeTestGroup(MappingTest);
const kSubcases = [
{ size: 0, range: [] },
{ size: 0, range: [undefined] },
{ size: 0, range: [undefined, undefined] },
{ size: 0, range: [0] },
{ size: 0, range: [0, undefined] },
{ size: 0, range: [0, 0] },
{ size: 12, range: [] },
{ size: 12, range: [undefined] },
{ size: 12, range: [undefined, undefined] },
{ size: 12, range: [0] },
{ size: 12, range: [0, undefined] },
{ size: 12, range: [0, 12] },
{ size: 12, range: [0, 0] },
{ size: 12, range: [8] },
{ size: 12, range: [8, undefined] },
{ size: 12, range: [8, 4] },
{ size: 28, range: [8, 8] },
{ size: 28, range: [8, 12] },
{ size: 512 * 1024, range: [] },
] as const;
function reifyMapRange(bufferSize: number, range: readonly [number?, number?]): [number, number] {
const offset = range[0] ?? 0;
return [offset, range[1] ?? bufferSize - offset];
}
const mapRegionBoundModes = ['default-expand', 'explicit-expand', 'minimal'] as const;
type MapRegionBoundMode = typeof mapRegionBoundModes[number];
function getRegionForMap(
bufferSize: number,
range: [number, number],
{
mapAsyncRegionLeft,
mapAsyncRegionRight,
}: {
mapAsyncRegionLeft: MapRegionBoundMode;
mapAsyncRegionRight: MapRegionBoundMode;
}
) {
const regionLeft = mapAsyncRegionLeft === 'minimal' ? range[0] : 0;
const regionRight = mapAsyncRegionRight === 'minimal' ? range[0] + range[1] : bufferSize;
return [
mapAsyncRegionLeft === 'default-expand' ? undefined : regionLeft,
mapAsyncRegionRight === 'default-expand' ? undefined : regionRight - regionLeft,
] as const;
}
g.test('mapAsync,write')
.desc(
`Use map-write to write to various ranges of variously-sized buffers, then expectContents
(which does copyBufferToBuffer + map-read) to ensure the contents were written.`
)
.params(u =>
u
.combine('mapAsyncRegionLeft', mapRegionBoundModes)
.combine('mapAsyncRegionRight', mapRegionBoundModes)
.beginSubcases()
.combineWithParams(kSubcases)
)
.fn(async t => {
const { size, range } = t.params;
const [rangeOffset, rangeSize] = reifyMapRange(size, range);
const buffer = t.device.createBuffer({
size,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
});
const mapRegion = getRegionForMap(size, [rangeOffset, rangeSize], t.params);
await buffer.mapAsync(GPUMapMode.WRITE, ...mapRegion);
const arrayBuffer = buffer.getMappedRange(...range);
t.checkMapWrite(buffer, rangeOffset, arrayBuffer, rangeSize);
});
g.test('mapAsync,write,unchanged_ranges_preserved')
.desc(
`Use mappedAtCreation or mapAsync to write to various ranges of variously-sized buffers, then
use mapAsync to map a different range and zero it out. Finally use expectGPUBufferValuesEqual
(which does copyBufferToBuffer + map-read) to verify that contents originally written outside the
second mapped range were not altered.`
)
.params(u =>
u
.beginSubcases()
.combine('mappedAtCreation', [false, true])
.combineWithParams([
{ size: 12, range1: [], range2: [8] },
{ size: 12, range1: [], range2: [0, 8] },
{ size: 12, range1: [0, 8], range2: [8] },
{ size: 12, range1: [8], range2: [0, 8] },
{ size: 28, range1: [], range2: [8, 8] },
{ size: 28, range1: [8, 16], range2: [16, 8] },
{ size: 32, range1: [16, 12], range2: [8, 16] },
{ size: 32, range1: [8, 8], range2: [24, 4] },
] as const)
)
.fn(async t => {
const { size, range1, range2, mappedAtCreation } = t.params;
const [rangeOffset1, rangeSize1] = reifyMapRange(size, range1);
const [rangeOffset2, rangeSize2] = reifyMapRange(size, range2);
const buffer = t.device.createBuffer({
mappedAtCreation,
size,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
});
// If the buffer is not mappedAtCreation map it now.
if (!mappedAtCreation) {
await buffer.mapAsync(GPUMapMode.WRITE);
}
// Set the initial contents of the buffer.
const init = buffer.getMappedRange(...range1);
assert(init.byteLength === rangeSize1);
const expectedBuffer = new ArrayBuffer(size);
const expected = new Uint32Array(
expectedBuffer,
rangeOffset1,
rangeSize1 / Uint32Array.BYTES_PER_ELEMENT
);
const data = new Uint32Array(init);
for (let i = 0; i < data.length; ++i) {
data[i] = expected[i] = i + 1;
}
buffer.unmap();
// Write to a second range of the buffer
await buffer.mapAsync(GPUMapMode.WRITE, ...range2);
const init2 = buffer.getMappedRange(...range2);
assert(init2.byteLength === rangeSize2);
const expected2 = new Uint32Array(
expectedBuffer,
rangeOffset2,
rangeSize2 / Uint32Array.BYTES_PER_ELEMENT
);
const data2 = new Uint32Array(init2);
for (let i = 0; i < data2.length; ++i) {
data2[i] = expected2[i] = 0;
}
buffer.unmap();
// Verify that the range of the buffer which was not overwritten was preserved.
t.expectGPUBufferValuesEqual(buffer, expected, rangeOffset1);
});
g.test('mapAsync,read')
.desc(
`Use mappedAtCreation to initialize various ranges of variously-sized buffers, then
map-read and check the read-back result.`
)
.params(u =>
u
.combine('mapAsyncRegionLeft', mapRegionBoundModes)
.combine('mapAsyncRegionRight', mapRegionBoundModes)
.beginSubcases()
.combineWithParams(kSubcases)
)
.fn(async t => {
const { size, range } = t.params;
const [rangeOffset, rangeSize] = reifyMapRange(size, range);
const buffer = t.device.createBuffer({
mappedAtCreation: true,
size,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
});
const init = buffer.getMappedRange(...range);
assert(init.byteLength === rangeSize);
const expected = new Uint32Array(new ArrayBuffer(rangeSize));
const data = new Uint32Array(init);
for (let i = 0; i < data.length; ++i) {
data[i] = expected[i] = i + 1;
}
buffer.unmap();
const mapRegion = getRegionForMap(size, [rangeOffset, rangeSize], t.params);
await buffer.mapAsync(GPUMapMode.READ, ...mapRegion);
const actual = new Uint8Array(buffer.getMappedRange(...range));
t.expectOK(checkElementsEqual(actual, new Uint8Array(expected.buffer)));
});
g.test('mapAsync,read,typedArrayAccess')
.desc(`Use various TypedArray types to read back from a mapped buffer`)
.params(u =>
u
.combine('mapAsyncRegionLeft', mapRegionBoundModes)
.combine('mapAsyncRegionRight', mapRegionBoundModes)
.beginSubcases()
.combineWithParams([
{ size: 80, range: [] },
{ size: 160, range: [] },
{ size: 160, range: [0, 80] },
{ size: 160, range: [80] },
{ size: 160, range: [40, 120] },
{ size: 160, range: [40] },
] as const)
)
.fn(async t => {
const { size, range } = t.params;
const [rangeOffset, rangeSize] = reifyMapRange(size, range);
// Fill an array buffer with a variety of values of different types.
const expectedArrayBuffer = new ArrayBuffer(80);
const uint8Expected = new Uint8Array(expectedArrayBuffer, 0, 2);
uint8Expected[0] = 1;
uint8Expected[1] = 255;
const int8Expected = new Int8Array(expectedArrayBuffer, 2, 2);
int8Expected[0] = -1;
int8Expected[1] = 127;
const uint16Expected = new Uint16Array(expectedArrayBuffer, 4, 2);
uint16Expected[0] = 1;
uint16Expected[1] = 65535;
const int16Expected = new Int16Array(expectedArrayBuffer, 8, 2);
int16Expected[0] = -1;
int16Expected[1] = 32767;
const uint32Expected = new Uint32Array(expectedArrayBuffer, 12, 2);
uint32Expected[0] = 1;
uint32Expected[1] = 4294967295;
const int32Expected = new Int32Array(expectedArrayBuffer, 20, 2);
int32Expected[2] = -1;
int32Expected[3] = 2147483647;
const float32Expected = new Float32Array(expectedArrayBuffer, 28, 3);
float32Expected[0] = 1;
float32Expected[1] = -1;
float32Expected[2] = 12345.6789;
const float64Expected = new Float64Array(expectedArrayBuffer, 40, 5);
float64Expected[0] = 1;
float64Expected[1] = -1;
float64Expected[2] = 12345.6789;
float64Expected[3] = Number.MAX_VALUE;
float64Expected[4] = Number.MIN_VALUE;
const buffer = t.device.createBuffer({
mappedAtCreation: true,
size,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
});
const init = buffer.getMappedRange(...range);
// Copy the expected values into the mapped range.
assert(init.byteLength === rangeSize);
memcpy({ src: expectedArrayBuffer }, { dst: init });
buffer.unmap();
const mapRegion = getRegionForMap(size, [rangeOffset, rangeSize], t.params);
await buffer.mapAsync(GPUMapMode.READ, ...mapRegion);
const mappedArrayBuffer = buffer.getMappedRange(...range);
t.expectOK(checkElementsEqual(new Uint8Array(mappedArrayBuffer, 0, 2), uint8Expected));
t.expectOK(checkElementsEqual(new Int8Array(mappedArrayBuffer, 2, 2), int8Expected));
t.expectOK(checkElementsEqual(new Uint16Array(mappedArrayBuffer, 4, 2), uint16Expected));
t.expectOK(checkElementsEqual(new Int16Array(mappedArrayBuffer, 8, 2), int16Expected));
t.expectOK(checkElementsEqual(new Uint32Array(mappedArrayBuffer, 12, 2), uint32Expected));
t.expectOK(checkElementsEqual(new Int32Array(mappedArrayBuffer, 20, 2), int32Expected));
t.expectOK(checkElementsEqual(new Float32Array(mappedArrayBuffer, 28, 3), float32Expected));
t.expectOK(checkElementsEqual(new Float64Array(mappedArrayBuffer, 40, 5), float64Expected));
});
g.test('mappedAtCreation')
.desc(
`Use mappedAtCreation to write to various ranges of variously-sized buffers created either
with or without the MAP_WRITE usage (since this could affect the mappedAtCreation upload path),
then expectContents (which does copyBufferToBuffer + map-read) to ensure the contents were written.`
)
.params(u =>
u //
.combine('mappable', [false, true])
.beginSubcases()
.combineWithParams(kSubcases)
)
.fn(async t => {
const { size, range, mappable } = t.params;
const [, rangeSize] = reifyMapRange(size, range);
const buffer = t.device.createBuffer({
mappedAtCreation: true,
size,
usage: GPUBufferUsage.COPY_SRC | (mappable ? GPUBufferUsage.MAP_WRITE : 0),
});
const arrayBuffer = buffer.getMappedRange(...range);
t.checkMapWrite(buffer, range[0] ?? 0, arrayBuffer, rangeSize);
});
g.test('remapped_for_write')
.desc(
`Use mappedAtCreation or mapAsync to write to various ranges of variously-sized buffers created
with the MAP_WRITE usage, then mapAsync again and ensure that the previously written values are
still present in the mapped buffer.`
)
.params(u =>
u //
.combine('mapAsyncRegionLeft', mapRegionBoundModes)
.combine('mapAsyncRegionRight', mapRegionBoundModes)
.beginSubcases()
.combine('mappedAtCreation', [false, true])
.combineWithParams(kSubcases)
)
.fn(async t => {
const { size, range, mappedAtCreation } = t.params;
const [rangeOffset, rangeSize] = reifyMapRange(size, range);
const buffer = t.device.createBuffer({
mappedAtCreation,
size,
usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
});
// If the buffer is not mappedAtCreation map it now.
if (!mappedAtCreation) {
await buffer.mapAsync(GPUMapMode.WRITE);
}
// Set the initial contents of the buffer.
const init = buffer.getMappedRange(...range);
assert(init.byteLength === rangeSize);
const expected = new Uint32Array(new ArrayBuffer(rangeSize));
const data = new Uint32Array(init);
for (let i = 0; i < data.length; ++i) {
data[i] = expected[i] = i + 1;
}
buffer.unmap();
// Check that upon remapping the for WRITE the values in the buffer are
// still the same.
const mapRegion = getRegionForMap(size, [rangeOffset, rangeSize], t.params);
await buffer.mapAsync(GPUMapMode.WRITE, ...mapRegion);
const actual = new Uint8Array(buffer.getMappedRange(...range));
t.expectOK(checkElementsEqual(actual, new Uint8Array(expected.buffer)));
});
|
sarahM0/cts
|
src/webgpu/api/validation/encoding/cmds/render/render.ts
|
import { kUnitCaseParamsBuilder } from '../../../../../../common/framework/params_builder.js';
import { kRenderEncodeTypes } from '../../../../../util/command_buffer_maker.js';
export const kRenderEncodeTypeParams = kUnitCaseParamsBuilder.combine(
'encoderType',
kRenderEncodeTypes
);
export function buildBufferOffsetAndSizeOOBTestParams(minAlignment: number, bufferSize: number) {
return kRenderEncodeTypeParams.combineWithParams([
// Explicit size
{ offset: 0, size: 0, _valid: true },
{ offset: 0, size: 1, _valid: true },
{ offset: 0, size: 4, _valid: true },
{ offset: 0, size: 5, _valid: true },
{ offset: 0, size: bufferSize, _valid: true },
{ offset: 0, size: bufferSize + 4, _valid: false },
{ offset: minAlignment, size: bufferSize, _valid: false },
{ offset: minAlignment, size: bufferSize - minAlignment, _valid: true },
{ offset: bufferSize - minAlignment, size: minAlignment, _valid: true },
{ offset: bufferSize, size: 1, _valid: false },
// Implicit size: buffer.size - offset
{ offset: 0, size: undefined, _valid: true },
{ offset: minAlignment, size: undefined, _valid: true },
{ offset: bufferSize - minAlignment, size: undefined, _valid: true },
{ offset: bufferSize, size: undefined, _valid: true },
{ offset: bufferSize + minAlignment, size: undefined, _valid: false },
]);
}
|
sarahM0/cts
|
src/webgpu/shader/execution/robust_access_vertex.spec.ts
|
export const description = `
Test vertex attributes behave correctly (no crash / data leak) when accessed out of bounds
Test coverage:
The following is parameterized (all combinations tested):
1) Draw call type? (drawIndexed, drawIndirect, drawIndexedIndirect)
- Run the draw call using an index buffer and/or an indirect buffer.
- Doesn't test direct draw, as vertex buffer OOB are CPU validated and treated as validation errors.
- Also the instance step mode vertex buffer OOB are CPU validated for drawIndexed, so we only test
robustness access for vertex step mode vertex buffers.
2) Draw call parameter (vertexCount, firstVertex, indexCount, firstIndex, baseVertex, instanceCount,
vertexCountInIndexBuffer)
- The parameter which goes out of bounds. Filtered depending on the draw call type.
- vertexCount, firstVertex: used for drawIndirect only, test for vertex step mode buffer OOB
- instanceCount: used for both drawIndirect and drawIndexedIndirect, test for instance step mode buffer OOB
- baseVertex, vertexCountInIndexBuffer: used for both drawIndexed and drawIndexedIndirect, test
for vertex step mode buffer OOB. vertexCountInIndexBuffer indicates how many vertices are used
within the index buffer, i.e. [0, 1, ..., vertexCountInIndexBuffer-1].
- indexCount, firstIndex: used for drawIndexedIndirect only, validate the vertex buffer access
when the vertex itself is OOB in index buffer. This never happens in drawIndexed as we have index
buffer OOB CPU validation for it.
3) Attribute type (float32, float32x2, float32x3, float32x4)
- The input attribute type in the vertex shader
4) Error scale (0, 1, 4, 10^2, 10^4, 10^6)
- Offset to add to the correct draw call parameter
- 0 For control case
5) Additional vertex buffers (0, +4)
- Tests that no OOB occurs if more vertex buffers are used
6) Partial last number and offset vertex buffer (false, true)
- Tricky cases that make vertex buffer OOB.
- With partial last number enabled, vertex buffer size will be 1 byte less than enough, making the
last vertex OOB with 1 byte.
- Offset vertex buffer will bind the vertex buffer to render pass with 4 bytes offset, causing OOB
- For drawIndexed, these two flags are suppressed for instance step mode vertex buffer to make sure
it pass the CPU validation.
The tests have one instance step mode vertex buffer bound for instanced attributes, to make sure
instanceCount / firstInstance are tested.
The tests include multiple attributes per vertex buffer.
The vertex buffers are filled by repeating a few values randomly chosen for each test until the
end of the buffer.
The tests run a render pipeline which verifies the following:
1) All vertex attribute values occur in the buffer or are 0 (for control case it can't be 0)
2) All gl_VertexIndex values are within the index buffer or 0
TODO:
Currently firstInstance is not tested, as for drawIndexed it is CPU validated, and for drawIndirect
and drawIndexedIndirect it should always be 0. Once there is an extension to allow making them non-zero,
it should be added into drawCallTestParameter list.
`;
import { makeTestGroup } from '../../../common/framework/test_group.js';
import { assert } from '../../../common/util/util.js';
import { GPUTest } from '../../gpu_test.js';
// Encapsulates a draw call (either indexed or non-indexed)
class DrawCall {
private test: GPUTest;
private vertexBuffers: GPUBuffer[];
// Add a float offset when binding vertex buffer
private offsetVertexBuffer: boolean;
// Keep instance step mode vertex buffer in range, in order to test vertex step
// mode buffer OOB in drawIndexed. Setting true will suppress partialLastNumber
// and offsetVertexBuffer for instance step mode vertex buffer.
private keepInstanceStepModeBufferInRange: boolean;
// Draw
public vertexCount: number;
public firstVertex: number;
// DrawIndexed
public vertexCountInIndexBuffer: number; // For generating index buffer in drawIndexed and drawIndexedIndirect
public indexCount: number; // For accessing index buffer in drawIndexed and drawIndexedIndirect
public firstIndex: number;
public baseVertex: number;
// Both Draw and DrawIndexed
public instanceCount: number;
public firstInstance: number;
constructor({
test,
vertexArrays,
vertexCount,
partialLastNumber,
offsetVertexBuffer,
keepInstanceStepModeBufferInRange,
}: {
test: GPUTest;
vertexArrays: Float32Array[];
vertexCount: number;
partialLastNumber: boolean;
offsetVertexBuffer: boolean;
keepInstanceStepModeBufferInRange: boolean;
}) {
this.test = test;
// Default arguments (valid call)
this.vertexCount = vertexCount;
this.firstVertex = 0;
this.vertexCountInIndexBuffer = vertexCount;
this.indexCount = vertexCount;
this.firstIndex = 0;
this.baseVertex = 0;
this.instanceCount = vertexCount;
this.firstInstance = 0;
this.offsetVertexBuffer = offsetVertexBuffer;
this.keepInstanceStepModeBufferInRange = keepInstanceStepModeBufferInRange;
// Since vertexInIndexBuffer is mutable, generation of the index buffer should be deferred to right before calling draw
// Generate vertex buffer
this.vertexBuffers = vertexArrays.map((v, i) => {
if (i === 0 && keepInstanceStepModeBufferInRange) {
// Suppress partialLastNumber for the first vertex buffer, aka the instance step mode buffer
return this.generateVertexBuffer(v, false);
} else {
return this.generateVertexBuffer(v, partialLastNumber);
}
});
}
// Insert a draw call into |pass| with specified type
public insertInto(pass: GPURenderPassEncoder, indexed: boolean, indirect: boolean) {
if (indexed) {
if (indirect) {
this.drawIndexedIndirect(pass);
} else {
this.drawIndexed(pass);
}
} else {
if (indirect) {
this.drawIndirect(pass);
} else {
this.draw(pass);
}
}
}
// Insert a draw call into |pass|
public draw(pass: GPURenderPassEncoder) {
this.bindVertexBuffers(pass);
pass.draw(this.vertexCount, this.instanceCount, this.firstVertex, this.firstInstance);
}
// Insert an indexed draw call into |pass|
public drawIndexed(pass: GPURenderPassEncoder) {
// Generate index buffer
const indexArray = new Uint32Array(this.vertexCountInIndexBuffer).map((_, i) => i);
const indexBuffer = this.test.makeBufferWithContents(indexArray, GPUBufferUsage.INDEX);
this.bindVertexBuffers(pass);
pass.setIndexBuffer(indexBuffer, 'uint32');
pass.drawIndexed(
this.indexCount,
this.instanceCount,
this.firstIndex,
this.baseVertex,
this.firstInstance
);
}
// Insert an indirect draw call into |pass|
public drawIndirect(pass: GPURenderPassEncoder) {
this.bindVertexBuffers(pass);
pass.drawIndirect(this.generateIndirectBuffer(), 0);
}
// Insert an indexed indirect draw call into |pass|
public drawIndexedIndirect(pass: GPURenderPassEncoder) {
// Generate index buffer
const indexArray = new Uint32Array(this.vertexCountInIndexBuffer).map((_, i) => i);
const indexBuffer = this.test.makeBufferWithContents(indexArray, GPUBufferUsage.INDEX);
this.bindVertexBuffers(pass);
pass.setIndexBuffer(indexBuffer, 'uint32');
pass.drawIndexedIndirect(this.generateIndexedIndirectBuffer(), 0);
}
// Bind all vertex buffers generated
private bindVertexBuffers(pass: GPURenderPassEncoder) {
let currSlot = 0;
for (let i = 0; i < this.vertexBuffers.length; i++) {
if (i === 0 && this.keepInstanceStepModeBufferInRange) {
// Keep the instance step mode buffer in range
pass.setVertexBuffer(currSlot++, this.vertexBuffers[i], 0);
} else {
pass.setVertexBuffer(currSlot++, this.vertexBuffers[i], this.offsetVertexBuffer ? 4 : 0);
}
}
}
// Create a vertex buffer from |vertexArray|
// If |partialLastNumber| is true, delete one byte off the end
private generateVertexBuffer(vertexArray: Float32Array, partialLastNumber: boolean): GPUBuffer {
let size = vertexArray.byteLength;
let length = vertexArray.length;
if (partialLastNumber) {
size -= 1; // Shave off one byte from the buffer size.
length -= 1; // And one whole element from the writeBuffer.
}
const buffer = this.test.device.createBuffer({
size,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST, // Ensure that buffer can be used by writeBuffer
});
this.test.device.queue.writeBuffer(buffer, 0, vertexArray.slice(0, length));
return buffer;
}
// Create an indirect buffer containing draw call values
private generateIndirectBuffer(): GPUBuffer {
const indirectArray = new Int32Array([
this.vertexCount,
this.instanceCount,
this.firstVertex,
this.firstInstance,
]);
return this.test.makeBufferWithContents(indirectArray, GPUBufferUsage.INDIRECT);
}
// Create an indirect buffer containing indexed draw call values
private generateIndexedIndirectBuffer(): GPUBuffer {
const indirectArray = new Int32Array([
this.indexCount,
this.instanceCount,
this.firstIndex,
this.baseVertex,
this.firstInstance,
]);
return this.test.makeBufferWithContents(indirectArray, GPUBufferUsage.INDIRECT);
}
}
// Parameterize different sized types
interface VertexInfo {
wgslType: string;
sizeInBytes: number;
validationFunc: string;
}
const typeInfoMap: { [k: string]: VertexInfo } = {
float32: {
wgslType: 'f32',
sizeInBytes: 4,
validationFunc: 'return valid(v);',
},
float32x2: {
wgslType: 'vec2<f32>',
sizeInBytes: 8,
validationFunc: 'return valid(v.x) && valid(v.y);',
},
float32x3: {
wgslType: 'vec3<f32>',
sizeInBytes: 12,
validationFunc: 'return valid(v.x) && valid(v.y) && valid(v.z);',
},
float32x4: {
wgslType: 'vec4<f32>',
sizeInBytes: 16,
validationFunc: `return valid(v.x) && valid(v.y) && valid(v.z) && valid(v.w) ||
v.x == 0.0 && v.y == 0.0 && v.z == 0.0 && (v.w == 0.0 || v.w == 1.0);`,
},
};
class F extends GPUTest {
generateBufferContents(
numVertices: number,
attributesPerBuffer: number,
typeInfo: VertexInfo,
arbitraryValues: number[],
bufferCount: number
): Float32Array[] {
// Make an array big enough for the vertices, attributes, and size of each element
const vertexArray = new Float32Array(
numVertices * attributesPerBuffer * (typeInfo.sizeInBytes / 4)
);
for (let i = 0; i < vertexArray.length; ++i) {
vertexArray[i] = arbitraryValues[i % arbitraryValues.length];
}
// Only the first buffer is instance step mode, all others are vertex step mode buffer
assert(bufferCount >= 2);
const bufferContents: Float32Array[] = [];
for (let i = 0; i < bufferCount; i++) {
bufferContents.push(vertexArray);
}
return bufferContents;
}
generateVertexBufferDescriptors(
bufferCount: number,
attributesPerBuffer: number,
type: GPUVertexFormat
) {
const typeInfo = typeInfoMap[type];
// Vertex buffer descriptors
const buffers: GPUVertexBufferLayout[] = [];
{
let currAttribute = 0;
for (let i = 0; i < bufferCount; i++) {
buffers.push({
arrayStride: attributesPerBuffer * typeInfo.sizeInBytes,
stepMode: i === 0 ? 'instance' : 'vertex',
attributes: Array(attributesPerBuffer)
.fill(0)
.map((_, i) => ({
shaderLocation: currAttribute++,
offset: i * typeInfo.sizeInBytes,
format: type as GPUVertexFormat,
})),
});
}
}
return buffers;
}
generateVertexShaderCode({
bufferCount,
attributesPerBuffer,
validValues,
typeInfo,
vertexIndexOffset,
numVertices,
isIndexed,
}: {
bufferCount: number;
attributesPerBuffer: number;
validValues: number[];
typeInfo: VertexInfo;
vertexIndexOffset: number;
numVertices: number;
isIndexed: boolean;
}): string {
// Create layout and attributes listing
let layoutStr = 'struct Attributes {';
const attributeNames = [];
{
let currAttribute = 0;
for (let i = 0; i < bufferCount; i++) {
for (let j = 0; j < attributesPerBuffer; j++) {
layoutStr += `[[location(${currAttribute})]] a_${currAttribute} : ${typeInfo.wgslType};\n`;
attributeNames.push(`a_${currAttribute}`);
currAttribute++;
}
}
}
layoutStr += '};';
const vertexShaderCode: string = `
${layoutStr}
fn valid(f : f32) -> bool {
return ${validValues.map(v => `f == ${v}.0`).join(' || ')};
}
fn validationFunc(v : ${typeInfo.wgslType}) -> bool {
${typeInfo.validationFunc}
}
[[stage(vertex)]] fn main(
[[builtin(vertex_index)]] VertexIndex : u32,
attributes : Attributes
) -> [[builtin(position)]] vec4<f32> {
var attributesInBounds = ${attributeNames
.map(a => `validationFunc(attributes.${a})`)
.join(' && ')};
var indexInBoundsCountFromBaseVertex =
(VertexIndex >= ${vertexIndexOffset}u &&
VertexIndex < ${vertexIndexOffset + numVertices}u);
var indexInBounds = VertexIndex == 0u || indexInBoundsCountFromBaseVertex;
var Position : vec4<f32>;
if (attributesInBounds && (${!isIndexed} || indexInBounds)) {
// Success case, move the vertex to the right of the viewport to show that at least one case succeed
Position = vec4<f32>(0.5, 0.0, 0.0, 1.0);
} else {
// Failure case, move the vertex to the left of the viewport
Position = vec4<f32>(-0.5, 0.0, 0.0, 1.0);
}
return Position;
}`;
return vertexShaderCode;
}
createRenderPipeline({
bufferCount,
attributesPerBuffer,
validValues,
typeInfo,
vertexIndexOffset,
numVertices,
isIndexed,
buffers,
}: {
bufferCount: number;
attributesPerBuffer: number;
validValues: number[];
typeInfo: VertexInfo;
vertexIndexOffset: number;
numVertices: number;
isIndexed: boolean;
buffers: GPUVertexBufferLayout[];
}): GPURenderPipeline {
const pipeline = this.device.createRenderPipeline({
vertex: {
module: this.device.createShaderModule({
code: this.generateVertexShaderCode({
bufferCount,
attributesPerBuffer,
validValues,
typeInfo,
vertexIndexOffset,
numVertices,
isIndexed,
}),
}),
entryPoint: 'main',
buffers,
},
fragment: {
module: this.device.createShaderModule({
code: `
[[stage(fragment)]] fn main() -> [[location(0)]] vec4<f32> {
return vec4<f32>(1.0, 0.0, 0.0, 1.0);
}`,
}),
entryPoint: 'main',
targets: [{ format: 'rgba8unorm' }],
},
primitive: { topology: 'point-list' },
});
return pipeline;
}
doTest({
bufferCount,
attributesPerBuffer,
dataType,
validValues,
vertexIndexOffset,
numVertices,
isIndexed,
isIndirect,
drawCall,
}: {
bufferCount: number;
attributesPerBuffer: number;
dataType: GPUVertexFormat;
validValues: number[];
vertexIndexOffset: number;
numVertices: number;
isIndexed: boolean;
isIndirect: boolean;
drawCall: DrawCall;
}): void {
// Vertex buffer descriptors
const buffers: GPUVertexBufferLayout[] = this.generateVertexBufferDescriptors(
bufferCount,
attributesPerBuffer,
dataType
);
// Pipeline setup, texture setup
const pipeline = this.createRenderPipeline({
bufferCount,
attributesPerBuffer,
validValues,
typeInfo: typeInfoMap[dataType],
vertexIndexOffset,
numVertices,
isIndexed,
buffers,
});
const colorAttachment = this.device.createTexture({
format: 'rgba8unorm',
size: { width: 2, height: 1, depthOrArrayLayers: 1 },
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
});
const colorAttachmentView = colorAttachment.createView();
const encoder = this.device.createCommandEncoder();
const pass = encoder.beginRenderPass({
colorAttachments: [
{
view: colorAttachmentView,
storeOp: 'store',
loadValue: { r: 0.0, g: 1.0, b: 0.0, a: 1.0 },
},
],
});
pass.setPipeline(pipeline);
// Run the draw variant
drawCall.insertInto(pass, isIndexed, isIndirect);
pass.endPass();
this.device.queue.submit([encoder.finish()]);
// Validate we see green on the left pixel, showing that no failure case is detected
this.expectSinglePixelIn2DTexture(
colorAttachment,
'rgba8unorm',
{ x: 0, y: 0 },
{ exp: new Uint8Array([0x00, 0xff, 0x00, 0xff]), layout: { mipLevel: 0 } }
);
}
}
export const g = makeTestGroup(F);
g.test('vertex_buffer_access')
.params(
u =>
u
.combineWithParams([
{ indexed: false, indirect: true },
{ indexed: true, indirect: false },
{ indexed: true, indirect: true },
])
.expand('drawCallTestParameter', function* (p) {
if (p.indexed) {
yield* ['baseVertex', 'vertexCountInIndexBuffer'] as const;
if (p.indirect) {
yield* ['indexCount', 'instanceCount', 'firstIndex'] as const;
}
} else if (p.indirect) {
yield* ['vertexCount', 'instanceCount', 'firstVertex'] as const;
}
})
.combine('type', Object.keys(typeInfoMap) as GPUVertexFormat[])
.combine('additionalBuffers', [0, 4])
.combine('partialLastNumber', [false, true])
.combine('offsetVertexBuffer', [false, true])
.combine('errorScale', [0, 1, 4, 10 ** 2, 10 ** 4, 10 ** 6])
.unless(p => p.drawCallTestParameter === 'instanceCount' && p.errorScale > 10 ** 4) // To avoid timeout
)
.fn(async t => {
const p = t.params;
const typeInfo = typeInfoMap[p.type];
// Number of vertices to draw
const numVertices = 4;
// Each buffer is bound to this many attributes (2 would mean 2 attributes per buffer)
const attributesPerBuffer = 2;
// Some arbitrary values to fill our buffer with to avoid collisions with other tests
const arbitraryValues = [990, 685, 446, 175];
// A valid value is 0 or one in the buffer
const validValues =
p.errorScale === 0 && !p.offsetVertexBuffer && !p.partialLastNumber
? arbitraryValues // Control case with no OOB access, must read back valid values in buffer
: [0, ...arbitraryValues]; // Testing case with OOB access, can be 0 for OOB data
// Generate vertex buffer contents. Only the first buffer is instance step mode, all others are vertex step mode
const bufferCount = p.additionalBuffers + 2; // At least one instance step mode and one vertex step mode buffer
const bufferContents = t.generateBufferContents(
numVertices,
attributesPerBuffer,
typeInfo,
arbitraryValues,
bufferCount
);
// Mutable draw call
const draw = new DrawCall({
test: t,
vertexArrays: bufferContents,
vertexCount: numVertices,
partialLastNumber: p.partialLastNumber,
offsetVertexBuffer: p.offsetVertexBuffer,
keepInstanceStepModeBufferInRange: p.indexed && !p.indirect, // keep instance step mode buffer in range for drawIndexed
});
// Offset the draw call parameter we are testing by |errorScale|
draw[p.drawCallTestParameter] += p.errorScale;
// Offset the range checks for gl_VertexIndex in the shader if we use BaseVertex
let vertexIndexOffset = 0;
if (p.drawCallTestParameter === 'baseVertex') {
vertexIndexOffset += p.errorScale;
}
t.doTest({
bufferCount,
attributesPerBuffer,
dataType: p.type,
validValues,
vertexIndexOffset,
numVertices,
isIndexed: p.indexed,
isIndirect: p.indirect,
drawCall: draw,
});
});
|
sarahM0/cts
|
src/common/tools/checklist.ts
|
<reponame>sarahM0/cts
import * as fs from 'fs';
import * as process from 'process';
import { DefaultTestFileLoader } from '../internal/file_loader.js';
import { Ordering, compareQueries } from '../internal/query/compare.js';
import { parseQuery } from '../internal/query/parseQuery.js';
import { TestQuery, TestQueryMultiFile } from '../internal/query/query.js';
import { loadTreeForQuery, TestTree } from '../internal/tree.js';
import { StacklessError } from '../internal/util.js';
import { assert } from '../util/util.js';
function usage(rc: number): void {
console.error('Usage:');
console.error(' tools/checklist FILE');
console.error(' tools/checklist my/list.txt');
process.exit(rc);
}
if (process.argv.length === 2) usage(0);
if (process.argv.length !== 3) usage(1);
type QueriesBySuite = Map<string, TestQuery[]>;
async function loadQueryListFromTextFile(filename: string): Promise<QueriesBySuite> {
const lines = (await fs.promises.readFile(filename, 'utf8')).split(/\r?\n/);
const allQueries = lines.filter(l => l).map(l => parseQuery(l.trim()));
const queriesBySuite: QueriesBySuite = new Map();
for (const query of allQueries) {
let suiteQueries = queriesBySuite.get(query.suite);
if (suiteQueries === undefined) {
suiteQueries = [];
queriesBySuite.set(query.suite, suiteQueries);
}
suiteQueries.push(query);
}
return queriesBySuite;
}
function checkForOverlappingQueries(queries: TestQuery[]): void {
for (const q1 of queries) {
for (const q2 of queries) {
if (q1 !== q2 && compareQueries(q1, q2) !== Ordering.Unordered) {
throw new StacklessError(`The following checklist items overlap:\n ${q1}\n ${q2}`);
}
}
}
}
function checkForUnmatchedSubtrees(tree: TestTree, matchQueries: TestQuery[]): number {
let subtreeCount = 0;
const unmatchedSubtrees: TestQuery[] = [];
const overbroadMatches: [TestQuery, TestQuery][] = [];
const alwaysExpandThroughLevel = 1; // expand to, at minimum, every file.
for (const collapsedSubtree of tree.iterateCollapsedQueries(true, alwaysExpandThroughLevel)) {
subtreeCount++;
let subtreeMatched = false;
for (const q of matchQueries) {
const comparison = compareQueries(q, collapsedSubtree);
assert(comparison !== Ordering.StrictSubset); // shouldn't happen, due to subqueriesToExpand
if (comparison === Ordering.StrictSuperset) overbroadMatches.push([q, collapsedSubtree]);
if (comparison !== Ordering.Unordered) subtreeMatched = true;
}
if (!subtreeMatched) unmatchedSubtrees.push(collapsedSubtree);
}
if (overbroadMatches.length) {
// (note, this doesn't show ALL multi-test queries - just ones that actually match any .spec.ts)
console.log(` FYI, the following checklist items were broader than one file:`);
for (const [q, collapsedSubtree] of overbroadMatches) {
console.log(` ${q} > ${collapsedSubtree}`);
}
}
if (unmatchedSubtrees.length) {
throw new StacklessError(`Found unmatched tests:\n ${unmatchedSubtrees.join('\n ')}`);
}
return subtreeCount;
}
(async () => {
console.log('Loading queries...');
const queriesBySuite = await loadQueryListFromTextFile(process.argv[2]);
console.log(' Found suites: ' + Array.from(queriesBySuite.keys()).join(' '));
const loader = new DefaultTestFileLoader();
for (const [suite, queriesInSuite] of queriesBySuite.entries()) {
console.log(`Suite "${suite}":`);
console.log(` Checking overlaps between ${queriesInSuite.length} checklist items...`);
checkForOverlappingQueries(queriesInSuite);
const suiteQuery = new TestQueryMultiFile(suite, []);
console.log(` Loading tree ${suiteQuery}...`);
const tree = await loadTreeForQuery(loader, suiteQuery, queriesInSuite);
console.log(' Found no invalid queries in the checklist. Checking for unmatched tests...');
const subtreeCount = checkForUnmatchedSubtrees(tree, queriesInSuite);
console.log(` No unmatched tests among ${subtreeCount} subtrees!`);
}
console.log(`Checklist looks good!`);
})().catch(ex => {
console.log(ex.stack ?? ex.toString());
process.exit(1);
});
|
sarahM0/cts
|
src/webgpu/api/validation/createBindGroupLayout.spec.ts
|
export const description = `
createBindGroupLayout validation tests.
TODO: make sure tests are complete.
`;
import { kUnitCaseParamsBuilder } from '../../../common/framework/params_builder.js';
import { makeTestGroup } from '../../../common/framework/test_group.js';
import {
kMaxBindingsPerBindGroup,
kShaderStages,
kShaderStageCombinations,
kTextureViewDimensions,
allBindingEntries,
bindingTypeInfo,
bufferBindingTypeInfo,
kBufferBindingTypes,
BGLEntry,
} from '../../capability_info.js';
import { ValidationTest } from './validation_test.js';
function clone<T extends GPUBindGroupLayoutDescriptor>(descriptor: T): T {
return JSON.parse(JSON.stringify(descriptor));
}
export const g = makeTestGroup(ValidationTest);
g.test('duplicate_bindings')
.desc('Test that uniqueness of binding numbers across entries is enforced.')
.paramsSubcasesOnly([
{ bindings: [0, 1], _valid: true },
{ bindings: [0, 0], _valid: false },
])
.fn(async t => {
const { bindings, _valid } = t.params;
const entries: Array<GPUBindGroupLayoutEntry> = [];
for (const binding of bindings) {
entries.push({
binding,
visibility: GPUShaderStage.COMPUTE,
buffer: { type: 'storage' as const },
});
}
t.expectValidationError(() => {
t.device.createBindGroupLayout({
entries,
});
}, !_valid);
});
g.test('visibility')
.desc(
`
Test that only the appropriate combinations of visibilities are allowed for each resource type.
- Test each possible combination of shader stage visibilities.
- Test each type of bind group resource.`
)
.params(u =>
u
.combine('visibility', kShaderStageCombinations)
.beginSubcases()
.combine('entry', allBindingEntries(false))
)
.fn(async t => {
const { visibility, entry } = t.params;
const info = bindingTypeInfo(entry);
const success = (visibility & ~info.validStages) === 0;
t.expectValidationError(() => {
t.device.createBindGroupLayout({
entries: [{ binding: 0, visibility, ...entry }],
});
}, !success);
});
g.test('multisampled_validation')
.desc('Test that multisampling is only allowed with "2d" view dimensions.')
.paramsSubcasesOnly(u =>
u //
.combine('viewDimension', [undefined, ...kTextureViewDimensions])
)
.fn(async t => {
const { viewDimension } = t.params;
const success = viewDimension === '2d' || viewDimension === undefined;
t.expectValidationError(() => {
t.device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.COMPUTE,
texture: { multisampled: true, viewDimension },
},
],
});
}, !success);
});
g.test('max_dynamic_buffers')
.desc(
`
Test that limits on the maximum number of dynamic buffers are enforced.
- Test creation of a bind group layout using the maximum number of dynamic buffers works.
- Test creation of a bind group layout using the maximum number of dynamic buffers + 1 fails.
- TODO(#230): Update to enforce per-stage and per-pipeline-layout limits on BGLs as well.`
)
.params(u =>
u
.combine('type', kBufferBindingTypes)
.beginSubcases()
.combine('extraDynamicBuffers', [0, 1])
.combine('staticBuffers', [0, 1])
)
.fn(async t => {
const { type, extraDynamicBuffers, staticBuffers } = t.params;
const info = bufferBindingTypeInfo({ type });
const dynamicBufferCount = info.perPipelineLimitClass.maxDynamic + extraDynamicBuffers;
const entries = [];
for (let i = 0; i < dynamicBufferCount; i++) {
entries.push({
binding: i,
visibility: GPUShaderStage.COMPUTE,
buffer: { type, hasDynamicOffset: true },
});
}
for (let i = dynamicBufferCount; i < dynamicBufferCount + staticBuffers; i++) {
entries.push({
binding: i,
visibility: GPUShaderStage.COMPUTE,
buffer: { type, hasDynamicOffset: false },
});
}
const descriptor = {
entries,
};
t.expectValidationError(() => {
t.device.createBindGroupLayout(descriptor);
}, extraDynamicBuffers > 0);
});
/**
* One bind group layout will be filled with kPerStageBindingLimit[...] of the type |type|.
* For each item in the array returned here, a case will be generated which tests a pipeline
* layout with one extra bind group layout with one extra binding. That extra binding will have:
*
* - If extraTypeSame, any of the binding types which counts toward the same limit as |type|.
* (i.e. 'storage-buffer' <-> 'readonly-storage-buffer').
* - Otherwise, an arbitrary other type.
*/
function* pickExtraBindingTypesForPerStage(entry: BGLEntry, extraTypeSame: boolean) {
if (extraTypeSame) {
const info = bindingTypeInfo(entry);
for (const extra of allBindingEntries(false)) {
const extraInfo = bindingTypeInfo(extra);
if (info.perStageLimitClass.class === extraInfo.perStageLimitClass.class) {
yield extra;
}
}
} else {
yield entry.sampler ? { texture: {} } : { sampler: {} };
}
}
const kMaxResourcesCases = kUnitCaseParamsBuilder
.combine('maxedEntry', allBindingEntries(false))
.beginSubcases()
.combine('maxedVisibility', kShaderStages)
.filter(p => (bindingTypeInfo(p.maxedEntry).validStages & p.maxedVisibility) !== 0)
.expand('extraEntry', p => [
...pickExtraBindingTypesForPerStage(p.maxedEntry, true),
...pickExtraBindingTypesForPerStage(p.maxedEntry, false),
])
.combine('extraVisibility', kShaderStages)
.filter(p => (bindingTypeInfo(p.extraEntry).validStages & p.extraVisibility) !== 0);
// Should never fail unless kMaxBindingsPerBindGroup is exceeded, because the validation for
// resources-of-type-per-stage is in pipeline layout creation.
g.test('max_resources_per_stage,in_bind_group_layout')
.desc(
`
Test that the maximum number of bindings of a given type per-stage cannot be exceeded in a
single bind group layout.
- Test each binding type.
- Test that creation of a bind group layout using the maximum number of bindings works.
- Test that creation of a bind group layout using the maximum number of bindings + 1 fails.
- TODO(#230): Update to enforce per-stage and per-pipeline-layout limits on BGLs as well.`
)
.params(kMaxResourcesCases)
.fn(async t => {
const { maxedEntry, extraEntry, maxedVisibility, extraVisibility } = t.params;
const maxedTypeInfo = bindingTypeInfo(maxedEntry);
const maxedCount = maxedTypeInfo.perStageLimitClass.max;
const maxResourceBindings: GPUBindGroupLayoutEntry[] = [];
for (let i = 0; i < maxedCount; i++) {
maxResourceBindings.push({
binding: i,
visibility: maxedVisibility,
...maxedEntry,
});
}
const goodDescriptor = { entries: maxResourceBindings };
// Control
t.device.createBindGroupLayout(goodDescriptor);
const newDescriptor = clone(goodDescriptor);
newDescriptor.entries.push({
binding: maxedCount,
visibility: extraVisibility,
...extraEntry,
});
const shouldError = maxedCount >= kMaxBindingsPerBindGroup;
t.expectValidationError(() => {
t.device.createBindGroupLayout(newDescriptor);
}, shouldError);
});
// One pipeline layout can have a maximum number of each type of binding *per stage* (which is
// different for each type). Test that the max works, then add one more binding of same-or-different
// type and same-or-different visibility.
g.test('max_resources_per_stage,in_pipeline_layout')
.desc(
`
Test that the maximum number of bindings of a given type per-stage cannot be exceeded across
multiple bind group layouts when creating a pipeline layout.
- Test each binding type.
- Test that creation of a pipeline using the maximum number of bindings works.
- Test that creation of a pipeline using the maximum number of bindings + 1 fails.
`
)
.params(kMaxResourcesCases)
.fn(async t => {
const { maxedEntry, extraEntry, maxedVisibility, extraVisibility } = t.params;
const maxedTypeInfo = bindingTypeInfo(maxedEntry);
const maxedCount = maxedTypeInfo.perStageLimitClass.max;
const extraTypeInfo = bindingTypeInfo(extraEntry);
const maxResourceBindings: GPUBindGroupLayoutEntry[] = [];
for (let i = 0; i < maxedCount; i++) {
maxResourceBindings.push({
binding: i,
visibility: maxedVisibility,
...maxedEntry,
});
}
const goodLayout = t.device.createBindGroupLayout({ entries: maxResourceBindings });
// Control
t.device.createPipelineLayout({ bindGroupLayouts: [goodLayout] });
const extraLayout = t.device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: extraVisibility,
...extraEntry,
},
],
});
// Some binding types use the same limit, e.g. 'storage-buffer' and 'readonly-storage-buffer'.
const newBindingCountsTowardSamePerStageLimit =
(maxedVisibility & extraVisibility) !== 0 &&
maxedTypeInfo.perStageLimitClass.class === extraTypeInfo.perStageLimitClass.class;
const layoutExceedsPerStageLimit = newBindingCountsTowardSamePerStageLimit;
t.expectValidationError(() => {
t.device.createPipelineLayout({ bindGroupLayouts: [goodLayout, extraLayout] });
}, layoutExceedsPerStageLimit);
});
|
sarahM0/cts
|
src/common/internal/version.ts
|
<filename>src/common/internal/version.ts
export const version = 'unknown';
|
sarahM0/cts
|
src/webgpu/shader/execution/builtin/float_built_functions.spec.ts
|
<filename>src/webgpu/shader/execution/builtin/float_built_functions.spec.ts<gh_stars>10-100
export const description = `WGSL execution test. Section: Float built-in functions`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
g.test('float_builtin_functions,acos')
.uniqueId('3b55004d23fedacf')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
acos:
T is f32 or vecN<f32> acos(e: T ) -> T Returns the arc cosine of e. Component-wise when T is a vector. (GLSLstd450Acos)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,asin')
.uniqueId('322c7c5ba84c257a')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
asin:
T is f32 or vecN<f32> asin(e: T ) -> T Returns the arc sine of e. Component-wise when T is a vector. (GLSLstd450Asin)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,atan2')
.uniqueId('cc85953f226ac95c')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
atan2:
T is f32 or vecN<f32> atan2(e1: T ,e2: T ) -> T Returns the arc tangent of e1 over e2. Component-wise when T is a vector. (GLSLstd450Atan2)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,clamp')
.uniqueId('88e39c61e6dbd26f')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
clamp:
T is f32 or vecN<f32> clamp(e1: T ,e2: T ,e3: T) -> T Returns min(max(e1,e2),e3). Component-wise when T is a vector. (GLSLstd450NClamp)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,cosh')
.uniqueId('e4499ece6f25610d')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
cosh:
T is f32 or vecN<f32> cosh(e: T ) -> T Returns the hyperbolic cosine of e. Component-wise when T is a vector (GLSLstd450Cosh)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,vector_case_cross')
.uniqueId('61356f087238c33c')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
vector case, cross:
T is f32 cross(e1: vec3<T> ,e2: vec3<T>) -> vec3<T> Returns the cross product of e1 and e2. (GLSLstd450Cross)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,distance')
.uniqueId('a1459d94b9d23add')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
distance:
T is f32 or vecN<f32> distance(e1: T ,e2: T ) -> f32 Returns the distance between e1 and e2 (e.g. length(e1-e2)). (GLSLstd450Distance)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,exp')
.uniqueId('ba1d78b3923e3ecc')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
exp:
T is f32 or vecN<f32> exp(e1: T ) -> T Returns the natural exponentiation of e1 (e.g. ee1). Component-wise when T is a vector. (GLSLstd450Exp)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,exp2')
.uniqueId('335173647c18d7b0')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
exp2:
T is f32 or vecN<f32> exp2(e: T ) -> T Returns 2 raised to the power e (e.g. 2e). Component-wise when T is a vector. (GLSLstd450Exp2)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,faceForward')
.uniqueId('ff98e4f5d2064a6f')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
faceForward:
T is vecN<f32> faceForward(e1: T ,e2: T ,e3: T ) -> T Returns e1 if dot(e2,e3) is negative, and -e1 otherwise. (GLSLstd450FaceForward)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,fma')
.uniqueId('c6212635b880548b')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
fma:
T is f32 or vecN<f32> fma(e1: T ,e2: T ,e3: T ) -> T Returns e1 * e2 + e3. Component-wise when T is a vector. (GLSLstd450Fma)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,fract')
.uniqueId('58222ecf6f963798')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
fract:
T is f32 or vecN<f32> fract(e: T ) -> T Returns the fractional bits of e (e.g. e - floor(e)). Component-wise when T is a vector. (GLSLstd450Fract)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,scalar_case_frexp')
.uniqueId('c5df46977f5b77a0')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
scalar case, frexp:
T is f32 frexp(e:T) -> _frexp_result Splits e into a significand and exponent of the form significand * 2exponent. Returns the _frexp_result built-in structure, defined as: struct _frexp_result { sig : f32; // significand part exp : i32; // exponent part
}; The magnitude of the significand is in the range of [0.5, 1.0) or 0. Note: A value cannot be explicitly declared with the type _frexp_result, but a value may infer the type. (GLSLstd450FrexpStruct)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,vector_case_frexp')
.uniqueId('69806278766b12a2')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
vector case, frexp:
T is vecN<f32> frexp(e:T) -> _frexp_result_vecN Splits the components of e into a significand and exponent of the form significand * 2exponent. Returns the _frexp_result_vecN built-in structure, defined as: struct _frexp_result_vecN { sig : vecN<f32>; // significand part exp : vecN<i32>; // exponent part
}; The magnitude of each component of the significand is in the range of [0.5, 1.0) or 0. Note: A value cannot be explicitly declared with the type _frexp_result_vecN, but a value may infer the type. (GLSLstd450FrexpStruct)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,inverseSqrt')
.uniqueId('84fc180ad82c5618')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
inverseSqrt:
T is f32 or vecN<f32> inverseSqrt(e: T ) -> T Returns the reciprocal of sqrt(e). Component-wise when T is a vector. (GLSLstd450InverseSqrt)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,ldexp')
.uniqueId('358f6e4501a32907')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
ldexp:
T is f32 or vecN<f32> I is i32 or vecN<i32>, where I is a scalar if T is a scalar, or a vector when T is a vector ldexp(e1: T ,e2: I ) -> T Returns e1 * 2e2. Component-wise when T is a vector. (GLSLstd450Ldexp)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,length')
.uniqueId('0e5dba3253f9dec6')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
length:
T is f32 or vecN<f32> length(e: T ) -> f32 Returns the length of e (e.g. abs(e) if T is a scalar, or sqrt(e[0]2 + e[1]2 + ...) if T is a vector). (GLSLstd450Length)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,log')
.uniqueId('7cd6780116b47d00')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
log:
T is f32 or vecN<f32> log(e: T ) -> T Returns the natural logaritm of e. Component-wise when T is a vector. (GLSLstd450Log)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,log2')
.uniqueId('9ed120de1990296a')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
log2:
T is f32 or vecN<f32> log2(e: T ) -> T Returns the base-2 logarithm of e. Component-wise when T is a vector. (GLSLstd450Log2)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,max')
.uniqueId('bcb6c69b4ec703b1')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
max:
T is f32 or vecN<f32> max(e1: T ,e2: T ) -> T Returns e2 if e1 is less than e2, and e1 otherwise. If one operand is a NaN, the other is returned. If both operands are NaNs, a NaN is returned. Component-wise when T is a vector. (GLSLstd450NMax)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,min')
.uniqueId('53efc46faad0f380')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
min:
T is f32 or vecN<f32> min(e1: T ,e2: T ) -> T Returns e2 if e2 is less than e1, and e1 otherwise. If one operand is a NaN, the other is returned. If both operands are NaNs, a NaN is returned. Component-wise when T is a vector. (GLSLstd450NMin)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,mix_all_same_type_operands')
.uniqueId('f17861e71386bb59')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
mix all same type operands:
T is f32 or vecN<f32> mix(e1: T ,e2: T ,e3: T) -> T Returns the linear blend of e1 and e2 (e.g. e1*(1-e3)+e2*e3). Component-wise when T is a vector. (GLSLstd450FMix)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,vector_mix_with_scalar_blending_factor')
.uniqueId('0a9f4a579e0c1348')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
vector mix with scalar blending factor:
T is vecN<f32> mix(e1: T ,e2: T ,e3: f32 ) -> T Returns the component-wise linear blend of e1 and e2, using scalar blending factor e3 for each component. Same as mix(e1,e2,T(e3)).
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,scalar_case_modf')
.uniqueId('2a7234321aef021d')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
scalar case, modf:
T is f32 modf(e:T) -> _modf_result Splits e into fractional and whole number parts. Returns the _modf_result built-in structure, defined as: struct _modf_result { fract : f32; // fractional part whole : f32; // whole part
}; Note: A value cannot be explicitly declared with the type _modf_result, but a value may infer the type. (GLSLstd450ModfStruct)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,vector_case_modf')
.uniqueId('d1426ca015843ddf')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
vector case, modf:
T is vecN<f32> modf(e:T) -> _modf_result_vecN Splits the components of e into fractional and whole number parts. Returns the _modf_result_vecN built-in structure, defined as: struct _modf_result_vecN { fract : vecN<f32>; // fractional part whole : vecN<f32>; // whole part
}; Note: A value cannot be explicitly declared with the type _modf_result_vecN, but a value may infer the type. (GLSLstd450ModfStruct)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,vector_case_normalize')
.uniqueId('29c971aea0969a86')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
vector case, normalize:
T is f32 normalize(e: vecN<T> ) -> vecN<T> Returns a unit vector in the same direction as e. (GLSLstd450Normalize)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,pow')
.uniqueId('a3ff963b1810c8c4')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
pow:
T is f32 or vecN<f32> pow(e1: T ,e2: T ) -> T Returns e1 raised to the power e2. Component-wise when T is a vector. (GLSLstd450Pow)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,quantize_to_f16')
.uniqueId('ec899bfcd46a6316')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
quantize to f16:
T is f32 or vecN<f32> quantizeToF16(e: T ) -> T Quantizes a 32-bit floating point value e as if e were converted to a IEEE 754 binary16 value, and then converted back to a IEEE 754 binary32 value. See section 12.5.2 Floating point conversion. Component-wise when T is a vector. Note: The vec2<f32> case is the same as unpack2x16float(pack2x16float(e)). (OpQuantizeToF16)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,reflect')
.uniqueId('463ddb8c59de0a98')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
reflect:
T is vecN<f32> reflect(e1: T ,e2: T ) -> T For the incident vector e1 and surface orientation e2, returns the reflection direction e1-2*dot(e2,e1)*e2. (GLSLstd450Reflect)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,refract')
.uniqueId('8e0c0021b980cf0a')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
refract:
T is vecN<f32>I is f32 refract(e1: T ,e2: T ,e3: I ) -> T For the incident vector e1 and surface normal e2, and the ratio of indices of refraction e3, let k = 1.0 -e3*e3* (1.0 - dot(e2,e1) * dot(e2,e1)). If k < 0.0, returns the refraction vector 0.0, otherwise return the refraction vector e3*e1- (e3* dot(e2,e1) + sqrt(k)) *e2. (GLSLstd450Refract)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,round')
.uniqueId('427d7791f5cd13dc')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
round:
T is f32 or vecN<f32> round(e: T ) -> T Result is the integer k nearest to e, as a floating point value. When e lies halfway between integers k and k+1, the result is k when k is even, and k+1 when k is odd. Component-wise when T is a vector. (GLSLstd450RoundEven)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,float_sign')
.uniqueId('411a9acbb5411c89')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
float sign:
T is f32 or vecN<f32> sign(e: T ) -> T Returns the sign of e. Component-wise when T is a vector. (GLSLstd450FSign)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,sin')
.uniqueId('d10f3745e5ea639d')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
sin:
T is f32 or vecN<f32> sin(e: T ) -> T Returns the sine of e. Component-wise when T is a vector. (GLSLstd450Sin)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,sinh')
.uniqueId('d1d30e0b45aabed5')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
sinh:
T is f32 or vecN<f32> sinh(e: T ) -> T Returns the hyperbolic sine of e. Component-wise when T is a vector. (GLSLstd450Sinh)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,smoothStep')
.uniqueId('d1e9e5d30be184c0')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
smoothStep:
T is f32 or vecN<f32> smoothStep(e1: T ,e2: T ,e3: T ) -> T Returns the smooth Hermite interpolation between 0 and 1. Component-wise when T is a vector. (GLSLstd450SmoothStep)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,sqrt')
.uniqueId('f16f8ca434c7e6d8')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
sqrt:
T is f32 or vecN<f32> sqrt(e: T ) -> T Returns the square root of e. Component-wise when T is a vector. (GLSLstd450Sqrt)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,step')
.uniqueId('ac15bb28d3fa3032')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
step:
T is f32 or vecN<f32> step(e1: T ,e2: T ) -> T Returns 0.0 if e1 is less than e2, and 1.0 otherwise. Component-wise when T is a vector. (GLSLstd450Step)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,tan')
.uniqueId('0229869d4d7f2702')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
tan:
T is f32 or vecN<f32> tan(e: T ) -> T Returns the tangent of e. Component-wise when T is a vector. (GLSLstd450Tan)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,tanh')
.uniqueId('5d36803b13b3522d')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
tanh:
T is f32 or vecN<f32> tanh(e: T ) -> T Returns the hyperbolic tangent of e. Component-wise when T is a vector. (GLSLstd450Tanh)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
g.test('float_builtin_functions,trunc')
.uniqueId('2f5ce2108f924fca')
.specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#float-builtin-functions')
.desc(
`
trunc:
T is f32 or vecN<f32> trunc(e: T ) -> T Returns the nearest whole number whose absolute value is less than or equal to e. Component-wise when T is a vector. (GLSLstd450Trunc)
Please read the following guidelines before contributing:
https://github.com/gpuweb/cts/blob/main/docs/plan_autogen.md
`
)
.params(u => u.combine('placeHolder1', ['placeHolder2', 'placeHolder3']))
.unimplemented();
|
sarahM0/cts
|
src/webgpu/api/validation/image_copy/layout_related.spec.ts
|
export const description = '';
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { assert } from '../../../../common/util/util.js';
import { kTextureFormatInfo, kSizedTextureFormats } from '../../../capability_info.js';
import { align } from '../../../util/math.js';
import {
bytesInACompleteRow,
dataBytesForCopyOrOverestimate,
dataBytesForCopyOrFail,
kImageCopyTypes,
} from '../../../util/texture/layout.js';
import {
ImageCopyTest,
texelBlockAlignmentTestExpanderForOffset,
texelBlockAlignmentTestExpanderForRowsPerImage,
formatCopyableWithMethod,
} from './image_copy.js';
export const g = makeTestGroup(ImageCopyTest);
g.test('bound_on_rows_per_image')
.params(u =>
u
.combine('method', kImageCopyTypes)
.beginSubcases()
.combine('rowsPerImage', [undefined, 0, 1, 2, 1024])
.combine('copyHeightInBlocks', [0, 1, 2])
.combine('copyDepth', [1, 3])
)
.fn(async t => {
const { rowsPerImage, copyHeightInBlocks, copyDepth, method } = t.params;
const format = 'rgba8unorm';
const copyHeight = copyHeightInBlocks * kTextureFormatInfo[format].blockHeight;
const texture = t.device.createTexture({
size: { width: 4, height: 4, depthOrArrayLayers: 3 },
format,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
});
const layout = { bytesPerRow: 1024, rowsPerImage };
const copySize = { width: 0, height: copyHeight, depthOrArrayLayers: copyDepth };
const { minDataSizeOrOverestimate, copyValid } = dataBytesForCopyOrOverestimate({
layout,
format,
copySize,
method,
});
t.testRun({ texture }, layout, copySize, {
dataSize: minDataSizeOrOverestimate,
method,
success: copyValid,
});
});
g.test('copy_end_overflows_u64')
.desc(`Test what happens when offset+requiredBytesInCopy overflows GPUSize64.`)
.params(u =>
u
.combine('method', kImageCopyTypes)
.beginSubcases()
.combineWithParams([
{ bytesPerRow: 2 ** 31, rowsPerImage: 2 ** 31, depthOrArrayLayers: 1, _success: true }, // success case
{ bytesPerRow: 2 ** 31, rowsPerImage: 2 ** 31, depthOrArrayLayers: 16, _success: false }, // bytesPerRow * rowsPerImage * (depthOrArrayLayers - 1) overflows.
])
)
.fn(async t => {
const { method, bytesPerRow, rowsPerImage, depthOrArrayLayers, _success } = t.params;
const texture = t.device.createTexture({
size: [1, 1, depthOrArrayLayers],
format: 'rgba8unorm',
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
});
t.testRun(
{ texture },
{ bytesPerRow, rowsPerImage },
{ width: 1, height: 1, depthOrArrayLayers },
{
dataSize: 10000,
method,
success: _success,
}
);
});
g.test('required_bytes_in_copy')
.desc(
`Test that the min data size condition (requiredBytesInCopy) is checked correctly.
- Exact requiredBytesInCopy should succeed.
- requiredBytesInCopy - 1 should fail.
`
)
.params(u =>
u
.combine('method', kImageCopyTypes)
.combine('format', kSizedTextureFormats)
.filter(formatCopyableWithMethod)
.beginSubcases()
.combineWithParams([
{ bytesPerRowPadding: 0, rowsPerImagePaddingInBlocks: 0 }, // no padding
{ bytesPerRowPadding: 0, rowsPerImagePaddingInBlocks: 6 }, // rowsPerImage padding
{ bytesPerRowPadding: 6, rowsPerImagePaddingInBlocks: 0 }, // bytesPerRow padding
{ bytesPerRowPadding: 15, rowsPerImagePaddingInBlocks: 17 }, // both paddings
])
.combineWithParams([
{ copyWidthInBlocks: 3, copyHeightInBlocks: 4, copyDepth: 5, offsetInBlocks: 0 }, // standard copy
{ copyWidthInBlocks: 5, copyHeightInBlocks: 4, copyDepth: 3, offsetInBlocks: 11 }, // standard copy, offset > 0
{ copyWidthInBlocks: 256, copyHeightInBlocks: 3, copyDepth: 2, offsetInBlocks: 0 }, // copyWidth is 256-aligned
{ copyWidthInBlocks: 0, copyHeightInBlocks: 4, copyDepth: 5, offsetInBlocks: 0 }, // empty copy because of width
{ copyWidthInBlocks: 3, copyHeightInBlocks: 0, copyDepth: 5, offsetInBlocks: 0 }, // empty copy because of height
{ copyWidthInBlocks: 3, copyHeightInBlocks: 4, copyDepth: 0, offsetInBlocks: 13 }, // empty copy because of depth, offset > 0
{ copyWidthInBlocks: 1, copyHeightInBlocks: 4, copyDepth: 5, offsetInBlocks: 0 }, // copyWidth = 1
{ copyWidthInBlocks: 3, copyHeightInBlocks: 1, copyDepth: 5, offsetInBlocks: 15 }, // copyHeight = 1, offset > 0
{ copyWidthInBlocks: 5, copyHeightInBlocks: 4, copyDepth: 1, offsetInBlocks: 0 }, // copyDepth = 1
{ copyWidthInBlocks: 7, copyHeightInBlocks: 1, copyDepth: 1, offsetInBlocks: 0 }, // copyHeight = 1 and copyDepth = 1
])
// The test texture size will be rounded up from the copy size to the next valid texture size.
// If the format is a depth/stencil format, its copy size must equal to subresource's size.
// So filter out depth/stencil cases where the rounded-up texture size would be different from the copy size.
.filter(({ format, copyWidthInBlocks, copyHeightInBlocks, copyDepth }) => {
const info = kTextureFormatInfo[format];
return (
(!info.depth && !info.stencil) ||
(copyWidthInBlocks > 0 && copyHeightInBlocks > 0 && copyDepth > 0)
);
})
)
.fn(async t => {
const {
offsetInBlocks,
bytesPerRowPadding,
rowsPerImagePaddingInBlocks,
copyWidthInBlocks,
copyHeightInBlocks,
copyDepth,
format,
method,
} = t.params;
const info = kTextureFormatInfo[format];
await t.selectDeviceOrSkipTestCase(info.feature);
// In the CopyB2T and CopyT2B cases we need to have bytesPerRow 256-aligned,
// to make this happen we align the bytesInACompleteRow value and multiply
// bytesPerRowPadding by 256.
const bytesPerRowAlignment = method === 'WriteTexture' ? 1 : 256;
const copyWidth = copyWidthInBlocks * info.blockWidth;
const copyHeight = copyHeightInBlocks * info.blockHeight;
const offset = offsetInBlocks * info.bytesPerBlock;
const rowsPerImage = copyHeight + rowsPerImagePaddingInBlocks * info.blockHeight;
const bytesPerRow =
align(bytesInACompleteRow(copyWidth, format), bytesPerRowAlignment) +
bytesPerRowPadding * bytesPerRowAlignment;
const copySize = { width: copyWidth, height: copyHeight, depthOrArrayLayers: copyDepth };
const layout = { offset, bytesPerRow, rowsPerImage };
const minDataSize = dataBytesForCopyOrFail({ layout, format, copySize, method });
const texture = t.createAlignedTexture(format, copySize);
t.testRun({ texture }, { offset, bytesPerRow, rowsPerImage }, copySize, {
dataSize: minDataSize,
method,
success: true,
});
if (minDataSize > 0) {
t.testRun({ texture }, { offset, bytesPerRow, rowsPerImage }, copySize, {
dataSize: minDataSize - 1,
method,
success: false,
});
}
});
g.test('rows_per_image_alignment')
.desc(`rowsPerImage is measured in multiples of block height, so has no alignment constraints.`)
.params(u =>
u
.combine('method', kImageCopyTypes)
.combine('format', kSizedTextureFormats)
.filter(formatCopyableWithMethod)
.beginSubcases()
.expand('rowsPerImage', texelBlockAlignmentTestExpanderForRowsPerImage)
// Copy height is info.blockHeight, so rowsPerImage must be equal or greater than it.
.filter(({ rowsPerImage, format }) => rowsPerImage >= kTextureFormatInfo[format].blockHeight)
)
.fn(async t => {
const { rowsPerImage, format, method } = t.params;
const info = kTextureFormatInfo[format];
await t.selectDeviceOrSkipTestCase(info.feature);
const size = { width: info.blockWidth, height: info.blockHeight, depthOrArrayLayers: 1 };
const texture = t.device.createTexture({
size,
format,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
});
t.testRun({ texture }, { bytesPerRow: 256, rowsPerImage }, size, {
dataSize: info.bytesPerBlock,
method,
success: true,
});
});
g.test('offset_alignment')
.desc(
`If texture format is not depth/stencil format, offset should be aligned with texture block. If texture format is depth/stencil format, offset should be a multiple of 4.`
)
.params(u =>
u
.combine('method', kImageCopyTypes)
.combine('format', kSizedTextureFormats)
.filter(formatCopyableWithMethod)
.beginSubcases()
.expand('offset', texelBlockAlignmentTestExpanderForOffset)
)
.fn(async t => {
const { format, offset, method } = t.params;
const info = kTextureFormatInfo[format];
await t.selectDeviceOrSkipTestCase(info.feature);
const size = { width: info.blockWidth, height: info.blockHeight, depthOrArrayLayers: 1 };
const texture = t.device.createTexture({
size,
format,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
});
let success = false;
if (method === 'WriteTexture') success = true;
if (info.depth || info.stencil) {
if (offset % 4 === 0) success = true;
} else {
if (offset % info.bytesPerBlock === 0) success = true;
}
t.testRun({ texture }, { offset, bytesPerRow: 256 }, size, {
dataSize: offset + info.bytesPerBlock,
method,
success,
});
});
g.test('bound_on_bytes_per_row')
.desc(`For all formats, verify image copy validations w.r.t bytesPerRow.`)
.params(u =>
u
.combine('method', kImageCopyTypes)
.combine('format', kSizedTextureFormats)
.filter(formatCopyableWithMethod)
.beginSubcases()
.combine('copyHeightInBlocks', [1, 2])
.combine('copyDepth', [1, 2])
.expandWithParams(p => {
const info = kTextureFormatInfo[p.format];
// We currently have a built-in assumption that for all formats, 128 % bytesPerBlock === 0.
// This assumption ensures that all division below results in integers.
assert(128 % info.bytesPerBlock === 0);
return [
// Copying exact fit with aligned bytesPerRow should work.
{
bytesPerRow: 256,
widthInBlocks: 256 / info.bytesPerBlock,
copyWidthInBlocks: 256 / info.bytesPerBlock,
_success: true,
},
// Copying into smaller texture when padding in bytesPerRow is enough should work unless
// it is a depth/stencil typed format.
{
bytesPerRow: 256,
widthInBlocks: 256 / info.bytesPerBlock,
copyWidthInBlocks: 256 / info.bytesPerBlock - 1,
_success: !(info.stencil || info.depth),
},
// Unaligned bytesPerRow should not work unless the method is 'WriteTexture'.
{
bytesPerRow: 128,
widthInBlocks: 128 / info.bytesPerBlock,
copyWidthInBlocks: 128 / info.bytesPerBlock,
_success: p.method === 'WriteTexture',
},
{
bytesPerRow: 384,
widthInBlocks: 384 / info.bytesPerBlock,
copyWidthInBlocks: 384 / info.bytesPerBlock,
_success: p.method === 'WriteTexture',
},
// When bytesPerRow is smaller than bytesInLastRow copying should fail.
{
bytesPerRow: 256,
widthInBlocks: (2 * 256) / info.bytesPerBlock,
copyWidthInBlocks: (2 * 256) / info.bytesPerBlock,
_success: false,
},
// When copyHeightInBlocks > 1, bytesPerRow must be specified.
{
bytesPerRow: undefined,
widthInBlocks: 256 / info.bytesPerBlock,
copyWidthInBlocks: 256 / info.bytesPerBlock,
_success: !(p.copyHeightInBlocks > 1 || p.copyDepth > 1),
},
];
})
)
.fn(async t => {
const {
method,
format,
bytesPerRow,
widthInBlocks,
copyWidthInBlocks,
copyHeightInBlocks,
copyDepth,
_success,
} = t.params;
const info = kTextureFormatInfo[format];
await t.selectDeviceOrSkipTestCase(info.feature);
// We create an aligned texture using the widthInBlocks which may be different from the
// copyWidthInBlocks. This allows us to test scenarios where the two may be different.
const texture = t.createAlignedTexture(format, {
width: widthInBlocks * info.blockWidth,
height: copyHeightInBlocks * info.blockHeight,
depthOrArrayLayers: copyDepth,
});
const layout = { bytesPerRow, rowsPerImage: copyHeightInBlocks };
const copySize = {
width: copyWidthInBlocks * info.blockWidth,
height: copyHeightInBlocks * info.blockHeight,
depthOrArrayLayers: copyDepth,
};
const { minDataSizeOrOverestimate } = dataBytesForCopyOrOverestimate({
layout,
format,
copySize,
method,
});
t.testRun({ texture }, layout, copySize, {
dataSize: minDataSizeOrOverestimate,
method,
success: _success,
});
});
g.test('bound_on_offset')
.params(u =>
u
.combine('method', kImageCopyTypes)
.beginSubcases()
.combine('offsetInBlocks', [0, 1, 2])
.combine('dataSizeInBlocks', [0, 1, 2])
)
.fn(async t => {
const { offsetInBlocks, dataSizeInBlocks, method } = t.params;
const format = 'rgba8unorm';
const info = kTextureFormatInfo[format];
const offset = offsetInBlocks * info.bytesPerBlock;
const dataSize = dataSizeInBlocks * info.bytesPerBlock;
const texture = t.device.createTexture({
size: { width: 4, height: 4, depthOrArrayLayers: 1 },
format,
usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
});
const success = offset <= dataSize;
t.testRun(
{ texture },
{ offset, bytesPerRow: 0 },
{ width: 0, height: 0, depthOrArrayLayers: 0 },
{ dataSize, method, success }
);
});
|
sarahM0/cts
|
src/webgpu/api/operation/command_buffer/clearBuffer.spec.ts
|
export const description = `
API operations tests for clearBuffer.
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { GPUTest } from '../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
g.test('clear')
.desc(
`Validate the correctness of the clear by filling the srcBuffer with testable data, doing
clearBuffer(), and verifying the content of the whole srcBuffer with MapRead:
Clear {4 bytes, part of, the whole} buffer {with, without} a non-zero valid offset that
- covers the whole buffer
- covers the beginning of the buffer
- covers the end of the buffer
- covers neither the beginning nor the end of the buffer`
)
.paramsSubcasesOnly(u =>
u //
.combine('offset', [0, 4, 8, 16, undefined])
.combine('size', [0, 4, 8, 16, undefined])
.expand('bufferSize', p => [
(p.offset ?? 0) + (p.size ?? 16),
(p.offset ?? 0) + (p.size ?? 16) + 8,
])
)
.fn(async t => {
const { offset, size, bufferSize } = t.params;
const bufferData = new Uint8Array(bufferSize);
for (let i = 0; i < bufferSize; ++i) {
bufferData[i] = i + 1;
}
const buffer = t.makeBufferWithContents(
bufferData,
GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC
);
const encoder = t.device.createCommandEncoder();
encoder.clearBuffer(buffer, offset, size);
t.device.queue.submit([encoder.finish()]);
const expectOffset = offset ?? 0;
const expectSize = size ?? bufferSize - expectOffset;
for (let i = 0; i < expectSize; ++i) {
bufferData[expectOffset + i] = 0;
}
t.expectGPUBufferValuesEqual(buffer, bufferData);
});
|
sarahM0/cts
|
src/webgpu/api/validation/resource_usages/texture/in_render_common.spec.ts
|
export const description = `
TODO:
- 2 views:
- x= {upon the same subresource, or different subresources {mip level, array layer, aspect} of the same texture}
- x= possible binding types on each view: read = {sampled texture, readonly storage texture}, write = {storage texture, render target}
- x= different shader stages: {0, ..., 7}
- maybe first view vis = {1, 2, 4}, second view vis = {0, ..., 7}
- x= bindings are in {
- same draw call
- same pass, different draw call
- different pass
- }
(It's probably not necessary to test EVERY possible combination of options in this whole
block, so we could break it down into a few smaller ones (one for different types of
subresources, one for same draw/same pass/different pass, one for visibilities).)
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { ValidationTest } from '../../validation_test.js';
export const g = makeTestGroup(ValidationTest);
|
sarahM0/cts
|
src/webgpu/api/validation/render_pass/storeOp.spec.ts
|
<gh_stars>10-100
export const description = `
API Validation Tests for RenderPass StoreOp.
Test Coverage:
- Tests that when depthReadOnly is true, depthStoreOp must be 'store'.
- When depthReadOnly is true and depthStoreOp is 'discard', an error should be generated.
- Tests that when stencilReadOnly is true, stencilStoreOp must be 'store'.
- When stencilReadOnly is true and stencilStoreOp is 'discard', an error should be generated.
- Tests that the depthReadOnly value matches the stencilReadOnly value.
- When depthReadOnly does not match stencilReadOnly, an error should be generated.
- Tests that depthReadOnly and stencilReadOnly default to false.
TODO: test interactions with depthLoadValue too
`;
import { makeTestGroup } from '../../../../common/framework/test_group.js';
import { ValidationTest } from '../validation_test.js';
export const g = makeTestGroup(ValidationTest);
g.test('store_op_and_read_only')
.paramsSimple([
{ readonly: true, _valid: true },
// Using depthReadOnly=true and depthStoreOp='discard' should cause a validation error.
{ readonly: true, depthStoreOp: 'discard', _valid: false },
// Using stencilReadOnly=true and stencilStoreOp='discard' should cause a validation error.
{ readonly: true, stencilStoreOp: 'discard', _valid: false },
// Mismatched depthReadOnly and stencilReadOnly values should cause a validation error.
{ readonly: false, _valid: true },
{ readonly: false, depthReadOnly: true, _valid: false },
{ readonly: false, stencilReadOnly: true, _valid: false },
// depthReadOnly and stencilReadOnly should default to false.
{ readonly: undefined, _valid: true },
{ readonly: undefined, depthReadOnly: true, _valid: false },
{ readonly: undefined, stencilReadOnly: true, _valid: false },
] as const)
.fn(async t => {
const {
readonly,
depthStoreOp = 'store',
depthReadOnly = readonly,
stencilStoreOp = 'store',
stencilReadOnly = readonly,
_valid,
} = t.params;
const depthAttachment = t.device.createTexture({
format: 'depth24plus-stencil8',
size: { width: 1, height: 1, depthOrArrayLayers: 1 },
usage: GPUTextureUsage.RENDER_ATTACHMENT,
});
const depthAttachmentView = depthAttachment.createView();
const encoder = t.device.createCommandEncoder();
const pass = encoder.beginRenderPass({
colorAttachments: [],
depthStencilAttachment: {
view: depthAttachmentView,
depthLoadValue: 'load',
depthStoreOp,
depthReadOnly,
stencilLoadValue: 'load',
stencilStoreOp,
stencilReadOnly,
},
});
pass.endPass();
t.expectValidationError(() => {
encoder.finish();
}, !_valid);
});
|
sarahM0/cts
|
src/common/internal/query/separators.ts
|
<filename>src/common/internal/query/separators.ts
/** Separator between big parts: suite:file:test:case */
export const kBigSeparator = ':';
/** Separator between path,to,file or path,to,test */
export const kPathSeparator = ',';
/** Separator between k=v;k=v */
export const kParamSeparator = ';';
/** Separator between key and value in k=v */
export const kParamKVSeparator = '=';
/** Final wildcard, if query is not single-case */
export const kWildcard = '*';
|
sarahM0/cts
|
src/webgpu/api/operation/memory_sync/texture/rw_and_wr.spec.ts
|
export const description = `
Memory Synchronization Tests for Texture: read before write and read after write.
TODO
`;
import { makeTestGroup } from '../../../../../common/framework/test_group.js';
import { GPUTest } from '../../../../gpu_test.js';
export const g = makeTestGroup(GPUTest);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.