hexsha stringlengths 40 40 | size int64 5 1.05M | ext stringclasses 588
values | lang stringclasses 305
values | max_stars_repo_path stringlengths 3 363 | max_stars_repo_name stringlengths 5 118 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count float64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringdate 2015-01-01 00:00:35 2022-03-31 23:43:49 ⌀ | max_stars_repo_stars_event_max_datetime stringdate 2015-01-01 12:37:38 2022-03-31 23:59:52 ⌀ | max_issues_repo_path stringlengths 3 363 | max_issues_repo_name stringlengths 5 118 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count float64 1 134k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 363 | max_forks_repo_name stringlengths 5 135 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count float64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringdate 2015-01-01 00:01:02 2022-03-31 23:27:27 ⌀ | max_forks_repo_forks_event_max_datetime stringdate 2015-01-03 08:55:07 2022-03-31 23:59:24 ⌀ | content stringlengths 5 1.05M | avg_line_length float64 1.13 1.04M | max_line_length int64 1 1.05M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7732c7a4f5f75cde1d6755811f50e83f3665232 | 1,009 | h | C | Pods/TuyaSmartDeviceKit/ios/TuyaSmartDeviceKit.framework/Versions/A/Headers/TuyaSmartMultiControlDeviceModel.h | BruceZhang2017/diffuser | acd1cd585ef51c07517d99012e6027907ff3e81d | [
"MIT"
] | null | null | null | Pods/TuyaSmartDeviceKit/ios/TuyaSmartDeviceKit.framework/Versions/A/Headers/TuyaSmartMultiControlDeviceModel.h | BruceZhang2017/diffuser | acd1cd585ef51c07517d99012e6027907ff3e81d | [
"MIT"
] | null | null | null | Pods/TuyaSmartDeviceKit/ios/TuyaSmartDeviceKit.framework/Versions/A/Headers/TuyaSmartMultiControlDeviceModel.h | BruceZhang2017/diffuser | acd1cd585ef51c07517d99012e6027907ff3e81d | [
"MIT"
] | null | null | null | //
// TuyaSmartMultiControlDeviceModel.h
// TuyaSmartDeviceKit
//
// Copyright (c) 2014-2021 Tuya Inc. (https://developer.tuya.com)
#import <Foundation/Foundation.h>
#import "TuyaSmartMultiControlDatapointModel.h"
NS_ASSUME_NONNULL_BEGIN
@interface TuyaSmartMultiControlDeviceModel : NSObject
@property (copy, nonatomic) NSString *devId;///< Device ID.
@property (copy, nonatomic) NSString *productId;///< Product ID.
@property (copy, nonatomic) NSString *name;///< Device name.
@property (copy, nonatomic) NSString *iconUrl;///< Device Icon download link.
@property (copy, nonatomic) NSString *roomName;///< Room name.
@property (assign, nonatomic) BOOL inRule;///< A boolean value indicates whether the device in an automated condition.
@property (strong, nonatomic) NSArray<TuyaSmartMultiControlDatapointModel *> *datapoints;
@property (strong, nonatomic) NSArray<NSString *> *multiControlIds;///< Multiple control group ID arrays that the device has been associated with.
@end
NS_ASSUME_NONNULL_END
| 38.807692 | 146 | 0.773043 |
685a5053042d21135a3c8b78864a2986bd020268 | 3,627 | h | C | src/plainopts.h | co0p/plainopts | 75a3a706ab923f211daecd05d94bd04c857c13a3 | [
"Unlicense"
] | 1 | 2015-02-23T13:55:41.000Z | 2015-02-23T13:55:41.000Z | src/plainopts.h | co0p/plainopts | 75a3a706ab923f211daecd05d94bd04c857c13a3 | [
"Unlicense"
] | null | null | null | src/plainopts.h | co0p/plainopts | 75a3a706ab923f211daecd05d94bd04c857c13a3 | [
"Unlicense"
] | null | null | null | #ifndef PLAINOPTS
#define PLAINOPTS 1
#include <string>
#include <iostream>
#include <sstream>
#include <map>
#include <set>
using namespace std;
namespace plainopts {
enum Settings {ERROR, VALID, MISSING, HELP};
/*****************************************************************************
* A Flag contains no value and is always optional.
* It's either set or not.
*/
class Flag {
private:
char shortName;
string longName;
string helpText;
bool isSet;
public:
Flag (char cName) : shortName(cName) {
longName = "";
helpText = "";
}
Flag (string lName) : longName(lName) {
shortName = '\0';
helpText = "";
}
Flag (char sName, string lName) : shortName(sName), longName(lName) {
helpText = "";
}
/// make this class polymorph
~Flag() { }
inline void set_help(string text) { helpText = text; }
inline string get_help() { return helpText; }
inline bool is_set() { return isSet; }
};
/*****************************************************************************
* Every option has a value associated with it.
* TODO: allow more than one value
*/
class Option : public Flag {
private:
string parameter;
bool mandatory;
public:
Option(char sName) : Flag(sName) { }
Option(string lName) : Flag(lName) { }
Option(char sName, string lName) : Flag(sName, lName) { }
~Option(){ }
inline void set_parameter(string value) {
parameter = value;
}
inline void set_mandatory(bool value) {
mandatory = value;
}
inline bool is_mandatory() {
return mandatory;
}
template <class T>
T get_value() {
T returnValue;
stringstream ss(stringstream::in | stringstream::out);
ss << parameter;
ss >> returnValue;
// TODO: check if parsing failed
return returnValue;
}
};
/*****************************************************************************
* This is the container for the flag and options
*/
class Plainopts {
private:
map<string, Flag*> available_entries;
set<string> defined_flags;
bool is_available(string key) {
return available_entries.count(key) > 0;
}
public:
Plainopts() { }
int parse(int argc, char *argv[]) {
return VALID;
}
const string last_error() { return "abc"; }
const string missing_entry() { return "abc"; }
const string help() { return "help"; }
/// adds a new flag option to the internal datastructures using the key
void add_flag(char sName, string lName) {
// don't allow empty names
if (sName == '\0' && lName == "") { return; }
Flag *new_flag = new Flag(sName, lName);
string key;
// add with short name if not empty
if (sName != '\0') {
key = sName;
available_entries.insert(pair<string, Flag*>(key, new_flag));
}
// add with long name if not empty
if (lName != "") {
key = lName;
available_entries.insert(pair<string, Flag*>(key, new_flag));
}
}
/// sets the help text on the object with long name as a key
void set_help(string key, string text) {
map<string, Flag*>::iterator it;
if (is_available(key)) {
it = available_entries.find(key);
(*it).second->set_help(text);
}
}
/// sets the help text on the object with short name as a key
void set_help(char key, string text) {
string keyAsString;
keyAsString = key;
set_help(keyAsString, text);
}
};
} /* namespace */
#endif /* PLAINOPTS */ | 22.115854 | 80 | 0.556107 |
be7b073d425bc449eef939629bd621d600b7505a | 5,708 | c | C | WRK-V1.2/tests/palsuite/threading/queueuserapc/test2/test2.c | intj-t/openvmsft | 0d17fbce8607ab2b880be976c2e86d8cfc3e83bb | [
"Intel"
] | null | null | null | WRK-V1.2/tests/palsuite/threading/queueuserapc/test2/test2.c | intj-t/openvmsft | 0d17fbce8607ab2b880be976c2e86d8cfc3e83bb | [
"Intel"
] | null | null | null | WRK-V1.2/tests/palsuite/threading/queueuserapc/test2/test2.c | intj-t/openvmsft | 0d17fbce8607ab2b880be976c2e86d8cfc3e83bb | [
"Intel"
] | null | null | null | /*=====================================================================
**
** Source: test2.c
**
** Purpose: Tests that APCs are not executed if a thread never enters an
** alertable state after they are queued.
**
**
** Copyright (c) 2006 Microsoft Corporation. All rights reserved.
**
** The use and distribution terms for this software are contained in the file
** named license.txt, which can be found in the root of this distribution.
** By using this software in any fashion, you are agreeing to be bound by the
** terms of this license.
**
** You must not remove this notice, or any other, from this software.
**
**
**===================================================================*/
#include <palsuite.h>
const int ChildThreadSleepTime = 2000;
const int InterruptTime = 1000;
DWORD ChildThread;
BOOL InAPC;
/* synchronization events */
static HANDLE hSyncEvent1 = NULL;
static HANDLE hSyncEvent2 = NULL;
/* thread result because we have no GetExitCodeThread() API */
static BOOL bThreadResult = FAIL;
VOID PALAPI APCFunc(ULONG_PTR dwParam)
{
InAPC = TRUE;
}
DWORD PALAPI SleeperProc(LPVOID lpParameter)
{
DWORD ret;
/* signal the main thread that we're ready to proceed */
if( ! SetEvent( hSyncEvent1 ) )
{
Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() );
bThreadResult = FAIL;
goto done;
}
/* wait for notification from the main thread */
ret = WaitForSingleObject( hSyncEvent2, 20000 );
if( ret != WAIT_OBJECT_0 )
{
Trace( "ERROR:WaitForSingleObject() returned %lu, "
"expected WAIT_OBJECT_0\n",
ret );
bThreadResult = FAIL;
goto done;
}
/* call our sleep function */
Sleep( ChildThreadSleepTime );
/* success if we reach here */
bThreadResult = PASS;
done:
/* signal the main thread that we're finished */
if( ! SetEvent( hSyncEvent1 ) )
{
Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() );
bThreadResult = FAIL;
}
/* return success or failure */
return bThreadResult;
}
int __cdecl main (int argc, char **argv)
{
/* local variables */
HANDLE hThread = 0;
int ret;
BOOL bResult = FAIL;
/* initialize the PAL */
if (0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
InAPC = FALSE;
/* create a pair of synchronization events to coordinate our threads */
hSyncEvent1 = CreateEvent( NULL, FALSE, FALSE, NULL );
if( hSyncEvent1 == NULL )
{
Trace( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() );
goto cleanup;
}
hSyncEvent2 = CreateEvent( NULL, FALSE, FALSE, NULL );
if( hSyncEvent2 == NULL )
{
Trace( "ERROR:%lu:CreateEvent() call failed\n", GetLastError() );
goto cleanup;
}
/* create a child thread */
hThread = CreateThread( NULL,
0,
(LPTHREAD_START_ROUTINE)SleeperProc,
0,
0,
&ChildThread);
if (hThread == NULL)
{
Trace( "ERROR:%lu:CreateThread() call failed\n",
GetLastError());
goto cleanup;
}
/* wait on our synchronization event to ensure the thread is running */
ret = WaitForSingleObject( hSyncEvent1, 20000 );
if( ret != WAIT_OBJECT_0 )
{
Trace( "ERROR:WaitForSingleObject() returned %lu, "
"expected WAIT_OBJECT_0\n",
ret );
goto cleanup;
}
/* queue a user APC on the child thread */
ret = QueueUserAPC(APCFunc, hThread, 0);
if (ret == 0)
{
Trace( "ERROR:%lu:QueueUserAPC() call failed\n",
GetLastError());
goto cleanup;
}
/* signal the child thread to continue */
if( ! SetEvent( hSyncEvent2 ) )
{
Trace( "ERROR:%lu:SetEvent() call failed\n", GetLastError() );
goto cleanup;
}
/* wait on our synchronization event to ensure the other thread is done */
ret = WaitForSingleObject( hSyncEvent1, 20000 );
if( ret != WAIT_OBJECT_0 )
{
Trace( "ERROR:WaitForSingleObject() returned %lu, "
"expected WAIT_OBJECT_0\n",
ret );
goto cleanup;
}
/* check that the thread executed successfully */
if( bThreadResult == FAIL )
{
goto cleanup;
}
/* check whether the APC function was executed */
if( InAPC )
{
Trace( "FAIL:APC function was executed but shouldn't have been\n" );
goto cleanup;
}
/* success if we reach here */
bResult = PASS;
cleanup:
/* wait for the other thread to finish */
if( hThread != NULL )
{
ret = WaitForSingleObject( hThread, INFINITE );
if (ret == WAIT_FAILED)
{
Trace( "ERROR:%lu:WaitForSingleObject() returned %lu, "
"expected WAIT_OBJECT_0\n",
ret );
bResult = FAIL;
}
}
/* close our synchronization handles */
if( hSyncEvent1 != NULL )
{
if( ! CloseHandle( hSyncEvent1 ) )
{
Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() );
bResult = FAIL;
}
}
if( hSyncEvent2 != NULL )
{
if( ! CloseHandle( hSyncEvent2 ) )
{
Trace( "ERROR:%lu:CloseHandle() call failed\n", GetLastError() );
bResult = FAIL;
}
}
if( bResult == FAIL )
{
Fail( "test failed\n" );
}
/* terminate the PAL */
PAL_Terminate();
/* return success */
return PASS;
}
| 24.709957 | 78 | 0.553959 |
9c3fcaff90f0f888da62cdc46a139a767d519a02 | 221 | h | C | chol_and_inverse.h | Jack-Kemp/cboot | 32cb55bbe6eff0fdc5e1375729d93abeecc2fe14 | [
"MIT"
] | 13 | 2016-02-24T05:08:21.000Z | 2021-08-03T20:46:05.000Z | chol_and_inverse.h | Jack-Kemp/cboot | 32cb55bbe6eff0fdc5e1375729d93abeecc2fe14 | [
"MIT"
] | null | null | null | chol_and_inverse.h | Jack-Kemp/cboot | 32cb55bbe6eff0fdc5e1375729d93abeecc2fe14 | [
"MIT"
] | 1 | 2019-04-16T11:54:55.000Z | 2019-04-16T11:54:55.000Z | #include "mpfr.h"
mpfr_t* mpfr_triangular_inverse(mpfr_t* A, int dim,mpfr_prec_t prec);
mpfr_t* mpfr_cholesky(mpfr_t* A, int dim,mpfr_prec_t prec);
mpfr_t* form_anti_band(mpfr_t* ab_vector, int dim, mpfr_prec_t prec);
| 27.625 | 69 | 0.778281 |
d52f5b62e002d70c5844f4cb375b8b73a2cf509f | 578 | h | C | SKReachability/SKReachability.h | khanlou/SKReachability | 5b0b4953efd02647444d6d47ba8bad69664e711b | [
"MIT"
] | 2 | 2015-04-15T00:14:48.000Z | 2018-09-12T00:30:07.000Z | SKReachability/SKReachability.h | khanlou/SKReachability | 5b0b4953efd02647444d6d47ba8bad69664e711b | [
"MIT"
] | null | null | null | SKReachability/SKReachability.h | khanlou/SKReachability | 5b0b4953efd02647444d6d47ba8bad69664e711b | [
"MIT"
] | null | null | null | //
// FSReach.h
// Fireside
//
// Created by Soroush Khanlou on 7/6/11.
// Copyright 2011 __MyCompanyName__. All rights reserved.
//
#import <Foundation/Foundation.h>
extern NSString *const kSKReachabilityChangedNotification;
@interface SKReachability : NSObject
@property (nonatomic, strong) NSString *host;
@property (nonatomic, assign) BOOL radioPoweredOn;
@property (nonatomic, assign) BOOL connectedToInternet;
@property (nonatomic, assign) BOOL connectedViaWifi;
@property (nonatomic, assign) BOOL connectedToHost;
+ (SKReachability*) sharedReachability;
@end
| 22.230769 | 58 | 0.769896 |
ec36c90fce0bbd1e57479a96ea4e9657a6e682a4 | 576 | h | C | src/bin/adb_reduce/rdc_handler.h | cumthqb/AntDB | ac2aa27563a378cd6af86181aa36fbbf216b50bc | [
"PostgreSQL",
"Apache-2.0"
] | 1 | 2019-03-01T02:44:33.000Z | 2019-03-01T02:44:33.000Z | src/bin/adb_reduce/rdc_handler.h | cumthqb/AntDB | ac2aa27563a378cd6af86181aa36fbbf216b50bc | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | src/bin/adb_reduce/rdc_handler.h | cumthqb/AntDB | ac2aa27563a378cd6af86181aa36fbbf216b50bc | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | /*-------------------------------------------------------------------------
*
* rdc_handler.h
* interface for handling messages
*
* Copyright (c) 2016-2017, ADB Development Group
*
* IDENTIFICATION
* src/bin/adb_reduce/rdc_handler.h
*
*-------------------------------------------------------------------------
*/
#ifndef RDC_HANDLE_H
#define RDC_HANDLE_H
#include "rdc_list.h"
#include "reduce/rdc_comm.h"
extern void HandlePlanIO(List **pln_nodes);
extern void HandleReduceIO(List **pln_nodes);
extern void BroadcastRdcClose(void);
#endif /* RDC_HANDLE_H */
| 24 | 75 | 0.543403 |
5740501bd986b5d43f24126c472a260a4e5cc450 | 399 | h | C | spadesLabSampleApp/SupportingFiles/spadesLabSampleApp-Bridging-Header.h | SPADES-PUBLIC/spades-swift-sdk | 99087a6c74ce69c9c31a0d6a24fd1217a2bf3697 | [
"MIT"
] | null | null | null | spadesLabSampleApp/SupportingFiles/spadesLabSampleApp-Bridging-Header.h | SPADES-PUBLIC/spades-swift-sdk | 99087a6c74ce69c9c31a0d6a24fd1217a2bf3697 | [
"MIT"
] | null | null | null | spadesLabSampleApp/SupportingFiles/spadesLabSampleApp-Bridging-Header.h | SPADES-PUBLIC/spades-swift-sdk | 99087a6c74ce69c9c31a0d6a24fd1217a2bf3697 | [
"MIT"
] | null | null | null | //
// spadesLabSampleApp-Bridging-Header.h
// spadesLabSampleApp
//
// Created by Stanis Laus Billy on 2/29/16.
// Copyright © 2016 QMEDIC. All rights reserved.
//
#ifndef spadesLabSampleApp_Bridging_Header_h
#define spadesLabSampleApp_Bridging_Header_h
#import <AWSS3/AWSS3.h>
#import <AWSSQS/AWSSQS.h>
#import <CommonCrypto/CommonCrypto.h>
#endif /* spadesLabSampleApp_Bridging_Header_h */
| 23.470588 | 49 | 0.776942 |
b9ccea3324240cb1663d8ac38ab12bbf957274dd | 5,660 | h | C | Carthage/Build/iOS/Dodo.framework/Headers/Dodo-Swift.h | marketplacer/FitLoader | 3bce283e999f87331ed4276d29ec2dd34b4fa820 | [
"MIT"
] | 2 | 2015-06-17T16:56:05.000Z | 2015-06-18T08:43:15.000Z | Carthage/Build/iOS/Dodo.framework/Headers/Dodo-Swift.h | marketplacer/FitLoader | 3bce283e999f87331ed4276d29ec2dd34b4fa820 | [
"MIT"
] | null | null | null | Carthage/Build/iOS/Dodo.framework/Headers/Dodo-Swift.h | marketplacer/FitLoader | 3bce283e999f87331ed4276d29ec2dd34b4fa820 | [
"MIT"
] | 1 | 2015-06-18T08:43:17.000Z | 2015-06-18T08:43:17.000Z | // Generated by Apple Swift version 2.1 (swiftlang-700.1.101.6 clang-700.1.76)
#pragma clang diagnostic push
#if defined(__has_include) && __has_include(<swift/objc-prologue.h>)
# include <swift/objc-prologue.h>
#endif
#pragma clang diagnostic ignored "-Wauto-import"
#include <objc/NSObject.h>
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#if defined(__has_include) && __has_include(<uchar.h>)
# include <uchar.h>
#elif !defined(__cplusplus) || __cplusplus < 201103L
typedef uint_least16_t char16_t;
typedef uint_least32_t char32_t;
#endif
typedef struct _NSZone NSZone;
#if !defined(SWIFT_PASTE)
# define SWIFT_PASTE_HELPER(x, y) x##y
# define SWIFT_PASTE(x, y) SWIFT_PASTE_HELPER(x, y)
#endif
#if !defined(SWIFT_METATYPE)
# define SWIFT_METATYPE(X) Class
#endif
#if defined(__has_attribute) && __has_attribute(objc_runtime_name)
# define SWIFT_RUNTIME_NAME(X) __attribute__((objc_runtime_name(X)))
#else
# define SWIFT_RUNTIME_NAME(X)
#endif
#if defined(__has_attribute) && __has_attribute(swift_name)
# define SWIFT_COMPILE_NAME(X) __attribute__((swift_name(X)))
#else
# define SWIFT_COMPILE_NAME(X)
#endif
#if !defined(SWIFT_CLASS_EXTRA)
# define SWIFT_CLASS_EXTRA
#endif
#if !defined(SWIFT_PROTOCOL_EXTRA)
# define SWIFT_PROTOCOL_EXTRA
#endif
#if !defined(SWIFT_ENUM_EXTRA)
# define SWIFT_ENUM_EXTRA
#endif
#if !defined(SWIFT_CLASS)
# if defined(__has_attribute) && __has_attribute(objc_subclassing_restricted)
# define SWIFT_CLASS(SWIFT_NAME) SWIFT_RUNTIME_NAME(SWIFT_NAME) __attribute__((objc_subclassing_restricted)) SWIFT_CLASS_EXTRA
# define SWIFT_CLASS_NAMED(SWIFT_NAME) __attribute__((objc_subclassing_restricted)) SWIFT_COMPILE_NAME(SWIFT_NAME) SWIFT_CLASS_EXTRA
# else
# define SWIFT_CLASS(SWIFT_NAME) SWIFT_RUNTIME_NAME(SWIFT_NAME) SWIFT_CLASS_EXTRA
# define SWIFT_CLASS_NAMED(SWIFT_NAME) SWIFT_COMPILE_NAME(SWIFT_NAME) SWIFT_CLASS_EXTRA
# endif
#endif
#if !defined(SWIFT_PROTOCOL)
# define SWIFT_PROTOCOL(SWIFT_NAME) SWIFT_RUNTIME_NAME(SWIFT_NAME) SWIFT_PROTOCOL_EXTRA
# define SWIFT_PROTOCOL_NAMED(SWIFT_NAME) SWIFT_COMPILE_NAME(SWIFT_NAME) SWIFT_PROTOCOL_EXTRA
#endif
#if !defined(SWIFT_EXTENSION)
# define SWIFT_EXTENSION(M) SWIFT_PASTE(M##_Swift_, __LINE__)
#endif
#if !defined(OBJC_DESIGNATED_INITIALIZER)
# if defined(__has_attribute) && __has_attribute(objc_designated_initializer)
# define OBJC_DESIGNATED_INITIALIZER __attribute__((objc_designated_initializer))
# else
# define OBJC_DESIGNATED_INITIALIZER
# endif
#endif
#if !defined(SWIFT_ENUM)
# define SWIFT_ENUM(_type, _name) enum _name : _type _name; enum SWIFT_ENUM_EXTRA _name : _type
#endif
typedef float swift_float2 __attribute__((__ext_vector_type__(2)));
typedef float swift_float3 __attribute__((__ext_vector_type__(3)));
typedef float swift_float4 __attribute__((__ext_vector_type__(4)));
typedef double swift_double2 __attribute__((__ext_vector_type__(2)));
typedef double swift_double3 __attribute__((__ext_vector_type__(3)));
typedef double swift_double4 __attribute__((__ext_vector_type__(4)));
typedef int swift_int2 __attribute__((__ext_vector_type__(2)));
typedef int swift_int3 __attribute__((__ext_vector_type__(3)));
typedef int swift_int4 __attribute__((__ext_vector_type__(4)));
#if defined(__has_feature) && __has_feature(modules)
@import UIKit;
@import ObjectiveC;
@import CoreGraphics;
#endif
#pragma clang diagnostic ignored "-Wproperty-attribute-mismatch"
#pragma clang diagnostic ignored "-Wduplicate-method-arg"
@interface UIView (SWIFT_EXTENSION(Dodo))
@end
@class NSLayoutConstraint;
@protocol UILayoutSupport;
/// Adjusts the length (constant value) of the bottom layout constraint when keyboard shows and hides.
SWIFT_CLASS("_TtC4Dodo29UnderKeyboardLayoutConstraint")
@interface UnderKeyboardLayoutConstraint : NSObject
- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER;
/// Stop listening for keyboard notifications.
- (void)stop;
/// Supply a bottom Auto Layout constraint. Its constant value will be adjusted by the height of the keyboard when it appears and hides.
///
/// \param bottomLayoutConstraint Supply a bottom layout constraint. Its constant value will be adjusted when keyboard is shown and hidden.
///
/// \param view Supply a view that will be used to animate the constraint. It is usually the superview containing the view with the constraint.
///
/// \param minMargin Specify the minimum margin between the keyboard and the bottom of the view the constraint is attached to. Default: 10.
///
/// \param bottomLayoutGuide Supply an optional bottom layout guide (like a tab bar) that will be taken into account during height calculations.
- (void)setup:(NSLayoutConstraint * __nonnull)bottomLayoutConstraint view:(UIView * __nonnull)view minMargin:(CGFloat)minMargin bottomLayoutGuide:(id <UILayoutSupport> __nullable)bottomLayoutGuide;
@end
/// Detects appearance of software keyboard and calls the supplied closures that can be used for changing the layout and moving view from under the keyboard.
SWIFT_CLASS("_TtC4Dodo21UnderKeyboardObserver")
@interface UnderKeyboardObserver : NSObject
/// Function that will be called before the keyboard is shown and before animation is started.
@property (nonatomic, copy) void (^ __nullable willAnimateKeyboard)(CGFloat);
/// Function that will be called inside the animation block. This can be used to call layoutIfNeeded on the view.
@property (nonatomic, copy) void (^ __nullable animateKeyboard)(CGFloat);
- (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER;
/// Start listening for keyboard notifications.
- (void)start;
/// Stop listening for keyboard notifications.
- (void)stop;
@end
#pragma clang diagnostic pop
| 39.034483 | 197 | 0.805124 |
02c42740fe4c1609c6c29e63ba113df6b0f58e6e | 4,056 | h | C | includes/push_swap.h | ryo-manba/push_swap | 6309f7f1e2503302e28ec29518a0126d467a810c | [
"MIT"
] | null | null | null | includes/push_swap.h | ryo-manba/push_swap | 6309f7f1e2503302e28ec29518a0126d467a810c | [
"MIT"
] | null | null | null | includes/push_swap.h | ryo-manba/push_swap | 6309f7f1e2503302e28ec29518a0126d467a810c | [
"MIT"
] | null | null | null | /* ************************************************************************** */
/* */
/* ::: :::::::: */
/* push_swap.h :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: rmatsuka < rmatsuka@student.42tokyo.jp> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2021/05/17 23:18:44 by rmatsuka #+# #+# */
/* Updated: 2021/06/08 07:38:06 by rmatsuka ### ########.fr */
/* */
/* ************************************************************************** */
#ifndef PUSH_SWAP_H
# define PUSH_SWAP_H
# include <stdlib.h>
# include <unistd.h>
# include <limits.h>
# include <stdbool.h>
# include "../libft/libft.h"
# define STACK_MAX 10000
# define MAX_5 12
typedef struct s_info
{
char *ans;
int ret;
struct s_info *next;
} t_info;
typedef struct s_dlist
{
int value;
int dummy;
int group;
struct s_dlist *next;
struct s_dlist *prev;
} t_dlist;
typedef struct s_data
{
int best;
int now;
int prev;
int ans[MAX_5];
int tmp[MAX_5];
} t_data;
typedef enum e_actions
{
SA,
SB,
SS,
PA,
PB,
RA,
RB,
RR,
RRA,
RRB,
RRR
} t_actions;
/* checker */
int checker(int argc, char **argv);
void put_result_and_free(t_dlist *a, t_dlist *b, t_info *info);
void run_action(t_info *data, t_dlist *a, t_dlist *b);
void info_clear(t_info **info);
void info_addback(t_info *info);
/* actions1 */
bool rotate(t_dlist *list);
bool double_rotate(t_dlist *a, t_dlist *b);
bool swap_list(t_dlist *list);
bool double_swap(t_dlist *a, t_dlist *b);
/* actions2 */
bool rev_rotate(t_dlist *list);
bool double_rev_rotate(t_dlist *a, t_dlist *b);
bool push(t_dlist *list1, t_dlist *list2);
/* actions3 */
int restore(int action);
bool list_move(int action, t_dlist *a, t_dlist *b);
void put_action(int action);
void put_result(t_data *data);
/* exit_fatal */
void exit_fatal(int n);
/* make_dlist */
int ft_atoi_ps(char *s);
void dlist_add_back(t_dlist *list, int value);
void check_overlapping(int num[], int size);
void apply_order(t_dlist *list, int sorted[]);
t_dlist *make_dlist(int argc, char **argv);
/* push_swap */
int push_swap(int argc, char **argv);
/* quick_sort */
void ft_swap(int *x, int *y);
void quick_sort(int *array, int left, int right);
/* utils1 */
int list_len(t_dlist *list);
bool is_sorted(t_dlist *list);
void list_clear(t_dlist **list);
t_dlist *create_list(void);
/* utils2 */
int get_max(t_dlist *list);
int get_min(t_dlist *list);
/* ft_sort1 */
void ft_sort(t_dlist *a, t_dlist *b);
void sort_list(t_dlist *a, t_dlist *b, t_list *cmd);
void doc_reverse(t_dlist *a, t_dlist *b, t_list *cmd);
void austin_reverse(t_dlist *a, t_dlist *b, t_list *cmd);
void first_separate(t_dlist *a, t_dlist *b, t_list *cmd, int pivot);
/* ft_sort2 */
int above_or_below(t_dlist *list, int pivot, bool is_a);
void partition1(t_dlist *a, t_dlist *b, t_list *cmd, int pivot);
void partition2(t_dlist *a, t_dlist *b, t_list *cmd, int *min);
void is_possible_rotate(t_dlist *a, t_dlist *b, t_list *cmd, int *min);
void rotate_sort_simple(t_dlist *a, t_list *cmd, int *min);
/* ft_sort3 */
int create_pivot(int min, int max);
int push_sort(t_dlist *a, t_dlist *b, t_list *cmd, int ret);
int rotate_sort(t_dlist *a, t_list *cmd, int len, int max_order);
int next_rotate_sort(t_dlist *a, t_list *cmd, int max_order);
void put_cmd(t_list *cmd);
/* three_or_less */
void three_or_less(t_dlist *list, t_list *cmd, int sz);
void three_sort(t_dlist *list, t_list *cmd);
/* five_or_less */
bool pruning_action(int now, int prev);
void five_or_less(t_dlist *a, t_dlist *b);
void init_data(t_data *data);
void dfs(t_dlist *a, t_dlist *b, t_data *data, int depth);
#endif | 27.591837 | 80 | 0.574951 |
becf78e5c99fcf92faa74cb4b50fb94b1d333421 | 3,563 | c | C | fs/nxffs/nxffs_truncate.c | eenurkka/incubator-nuttx | 5c3d6bba6d9ec5015896c3019cd2064696373210 | [
"Apache-2.0"
] | 1,006 | 2019-12-17T23:45:41.000Z | 2022-03-31T19:42:44.000Z | fs/nxffs/nxffs_truncate.c | eenurkka/incubator-nuttx | 5c3d6bba6d9ec5015896c3019cd2064696373210 | [
"Apache-2.0"
] | 2,661 | 2019-12-21T15:16:09.000Z | 2022-03-31T22:30:04.000Z | fs/nxffs/nxffs_truncate.c | eenurkka/incubator-nuttx | 5c3d6bba6d9ec5015896c3019cd2064696373210 | [
"Apache-2.0"
] | 613 | 2019-12-21T10:17:37.000Z | 2022-03-28T09:42:20.000Z | /****************************************************************************
* fs/nxffs/nxffs_truncate.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <fcntl.h>
#include <assert.h>
#include <errno.h>
#include <debug.h>
#include "nxffs.h"
#ifdef __NO_TRUNCATE_SUPPORT__
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: nxffs_truncate
*
* Description:
* Set the length of the open, regular file associated with the file
* structure 'filep' to 'length'.
*
****************************************************************************/
int nxffs_truncate(FAR struct file *filep, off_t length)
{
FAR struct nxffs_volume_s *volume;
FAR struct nxffs_wrfile_s *wrfile;
off_t oldsize;
int ret;
finfo("Write %d bytes to offset %d\n", buflen, filep->f_pos);
/* Sanity checks */
DEBUGASSERT(filep->f_priv != NULL && filep->f_inode != NULL);
/* Recover the open file state from the struct file instance */
wrfile = (FAR struct nxffs_wrfile_s *)filep->f_priv;
/* Recover the volume state from the open file */
volume = (FAR struct nxffs_volume_s *)filep->f_inode->i_private;
DEBUGASSERT(volume != NULL);
/* Get exclusive access to the volume. Note that the volume exclsem
* protects the open file list.
*/
ret = nxsem_wait(&volume->exclsem);
if (ret < 0)
{
ferr("ERROR: nxsem_wait failed: %d\n", ret);
goto errout;
}
/* Check if the file was opened with write access */
if ((wrfile->ofile.oflags & O_WROK) == 0)
{
ferr("ERROR: File not open for write access\n");
ret = -EACCES;
goto errout_with_semaphore;
}
/* Are we shrinking the file? Or extending it? */
oldsize = wrfile->ofile.entry.datlen;
if (oldsize == length)
{
ret = OK;
goto errout_with_semaphore;
}
else if (oldsize > length)
{
/* We are shrinking the file.
*
* REVISIT: Logic to shrink the file has not yet been implemented.
*/
ret = -ENOSYS;
}
else
{
/* We are zero-extending the file. This essential amount to a write-
* append operation with zero data.
*/
ret = nxffs_wrextend(volume, wrfile, length);
}
errout_with_semaphore:
nxsem_post(&volume->exclsem);
errout:
return ret;
}
#endif /* __NO_TRUNCATE_SUPPORT__ */
| 28.504 | 78 | 0.555992 |
7e484db1cb1284101409de17ab6c8608de886432 | 953 | h | C | src/Cubestein3D/Entity.h | DioMuller/cubestein3D | 5a358e1662287f72fdb8e3a10a664413f24a462e | [
"MIT"
] | null | null | null | src/Cubestein3D/Entity.h | DioMuller/cubestein3D | 5a358e1662287f72fdb8e3a10a664413f24a462e | [
"MIT"
] | null | null | null | src/Cubestein3D/Entity.h | DioMuller/cubestein3D | 5a358e1662287f72fdb8e3a10a664413f24a462e | [
"MIT"
] | null | null | null | #pragma once
#include <list>
#include "Renderer.h"
#include "Behavior.h"
#include "Rect.h"
class Entity
{
////////////////////////////////////////
// Attributes
////////////////////////////////////////
public:
Vector position;
Vector rotation;
Vector size;
std::string tag;
protected:
std::list<Behavior*> behaviors;
////////////////////////////////////////
// Constructor / Destructor
////////////////////////////////////////
public:
Entity();
~Entity();
////////////////////////////////////////
// Methods
////////////////////////////////////////
public:
virtual void Update(long delta);
virtual void Render(long delta, Renderer* renderer);
virtual void Destroy();
bool CheckCollision(Entity* other);
Rect GetCollisionRect();
virtual void CollideWith(Entity* other);
void AddBehavior(Behavior* behavior);
void RemoveBehavior(Behavior* behavior);
void ClearBehaviors();
Vector GetDirection();
};
| 19.854167 | 54 | 0.520462 |
f2e5c0a20f436c304bbd0311973286ecaa7eb8b3 | 8,342 | c | C | src/cache.c | rnleach/nbm-report | 3211bf3132c3af90c78fe571dfca26ff39256b9c | [
"MIT"
] | 1 | 2021-03-05T17:38:12.000Z | 2021-03-05T17:38:12.000Z | src/cache.c | rnleach/nbm-report | 3211bf3132c3af90c78fe571dfca26ff39256b9c | [
"MIT"
] | 2 | 2020-09-07T01:23:03.000Z | 2020-10-04T23:53:50.000Z | src/cache.c | rnleach/nbm-report | 3211bf3132c3af90c78fe571dfca26ff39256b9c | [
"MIT"
] | null | null | null | #include "cache.h"
#include <sys/stat.h>
#include <zlib.h>
#include <sqlite3.h>
static struct ByteBuffer
compress_text_buffer(struct TextBuffer const in_text[static 1])
{
assert(in_text);
struct ByteBuffer out_buf = byte_buffer_with_capacity(in_text->size + 2);
int z_ret = Z_OK;
z_stream strm = {.zalloc = Z_NULL, .zfree = Z_NULL, .opaque = Z_NULL};
z_ret = deflateInit(&strm, 9);
Stopif(z_ret != Z_OK, exit(EXIT_FAILURE), "zlib deflate init error.");
strm.avail_in = in_text->size;
strm.next_in = in_text->byte_data;
do {
int out_start = byte_buffer_remaining_capacity(&out_buf);
strm.avail_out = out_start;
strm.next_out = byte_buffer_next_write_pos(&out_buf);
z_ret = deflate(&strm, Z_FINISH);
Stopif(z_ret == Z_STREAM_ERROR, exit(EXIT_FAILURE), "zlib stream clobbered");
byte_buffer_increase_size(&out_buf, out_start - strm.avail_out);
if (strm.avail_out == 0) {
byte_buffer_set_capacity(&out_buf, strm.avail_in + 256);
}
} while (strm.avail_out == 0 && strm.avail_in > 0);
deflateEnd(&strm);
return out_buf;
}
static struct TextBuffer
uncompress_text(int in_size, unsigned char in[in_size])
{
assert(in);
struct TextBuffer out_buf = text_buffer_with_capacity(in_size * 10);
int z_ret = Z_OK;
z_stream strm = {
.zalloc = Z_NULL, .zfree = Z_NULL, .opaque = Z_NULL, .avail_in = 0, .next_in = Z_NULL};
z_ret = inflateInit(&strm);
Stopif(z_ret != Z_OK, exit(EXIT_FAILURE), "zlib inflate init error.");
strm.avail_in = in_size;
strm.next_in = in;
do {
int out_start = out_buf.capacity - out_buf.size;
strm.avail_out = out_start;
strm.next_out = &out_buf.byte_data[out_buf.size];
z_ret = inflate(&strm, Z_FINISH);
switch (z_ret) {
case Z_NEED_DICT: // fall through
case Z_DATA_ERROR: // fall through
case Z_MEM_ERROR: // fall through
Stopif(true, exit(EXIT_FAILURE), "zlib error inflating.");
}
out_buf.size += out_start - strm.avail_out;
if (strm.avail_out == 0) {
text_buffer_set_capacity(&out_buf, out_buf.capacity + strm.avail_in * 6);
}
} while (strm.avail_out == 0 && strm.avail_in > 0);
inflateEnd(&strm);
return out_buf;
}
static const char *
get_or_create_cache_path()
{
static char path[64] = {0};
char const *home = getenv("HOME");
Stopif(!home, exit(EXIT_FAILURE), "could not find user's home directory.");
sprintf(path, "%s/.local/", home);
struct stat st = {0};
if (stat(path, &st) == -1) {
mkdir(path, 0774);
}
sprintf(path, "%s/.local/share/", home);
if (stat(path, &st) == -1) {
mkdir(path, 0774);
}
sprintf(path, "%s/.local/share/nbm-report/", home);
if (stat(path, &st) == -1) {
mkdir(path, 0774);
}
sprintf(path, "%s/.local/share/nbm-report/cache.sqlite", home);
return path;
}
/** Global handle to the cache. */
static sqlite3 *cache = 0;
void
cache_initialize()
{
char const *path = get_or_create_cache_path();
int result = sqlite3_open(path, &cache);
Stopif(result != SQLITE_OK, exit(EXIT_FAILURE), "unable to open download cache.");
char *sql = "CREATE TABLE IF NOT EXISTS nbm ( \n"
" site TEXT NOT NULL, \n"
" init_time INTEGER NOT NULL, \n"
" data BLOB, \n"
" PRIMARY KEY (site, init_time)); \n";
sqlite3_stmt *statement = 0;
int rc = sqlite3_prepare_v2(cache, sql, -1, &statement, 0);
Stopif(rc != SQLITE_OK, exit(EXIT_FAILURE), "error preparing cache initialization sql: %s",
sqlite3_errstr(rc));
rc = sqlite3_step(statement);
Stopif(rc != SQLITE_DONE, exit(EXIT_FAILURE), "error executing cache initialization sql.");
rc = sqlite3_finalize(statement);
Stopif(rc != SQLITE_OK, exit(EXIT_FAILURE), "error finalizing cache initialization sql.");
}
void
cache_finalize()
{
time_t now = time(0);
time_t too_old = now - 60 * 60 * 24 * 555; // About 555 days. That's over 1.5 years!
char *sql = "DELETE FROM nbm WHERE init_time < ?";
sqlite3_stmt *statement = 0;
int rc = sqlite3_prepare_v2(cache, sql, -1, &statement, 0);
Stopif(rc != SQLITE_OK, exit(EXIT_FAILURE), "error preparing delete statement: %s",
sqlite3_errstr(rc));
rc = sqlite3_bind_int64(statement, 1, too_old);
Stopif(rc != SQLITE_OK, exit(EXIT_FAILURE), "error binding init_time in delete.");
rc = sqlite3_step(statement);
Stopif(rc != SQLITE_ROW && rc != SQLITE_DONE, exit(EXIT_FAILURE),
"error executing select sql: %s", sqlite3_errstr(rc));
rc = sqlite3_finalize(statement);
Stopif(rc != SQLITE_OK, exit(EXIT_FAILURE), "error finalizing delete statement");
int result = sqlite3_close(cache);
if (result != SQLITE_OK) {
fprintf(stderr, "ERROR CLOSING DOWNLOAD CACHE\n");
fprintf(stderr, "sqlite3 message: %s\n", sqlite3_errmsg(cache));
}
}
struct TextBuffer
cache_retrieve(char const file_name[static 1], time_t init_time)
{
assert(file_name);
struct TextBuffer out_buf = text_buffer_with_capacity(0);
char const *sql = "SELECT data FROM nbm WHERE site = ? AND init_time = ?";
sqlite3_stmt *statement = 0;
int rc = sqlite3_prepare_v2(cache, sql, -1, &statement, 0);
Stopif(rc != SQLITE_OK, goto ERR_RETURN, "error preparing select statement: %s",
sqlite3_errstr(rc));
rc = sqlite3_bind_text(statement, 1, file_name, -1, 0);
Stopif(rc != SQLITE_OK, goto ERR_RETURN, "error binding site in select.");
rc = sqlite3_bind_int64(statement, 2, init_time);
Stopif(rc != SQLITE_OK, goto ERR_RETURN, "error binding init_time in select.");
rc = sqlite3_step(statement);
Stopif(rc != SQLITE_ROW && rc != SQLITE_DONE, goto ERR_RETURN, "error executing select sql: %s",
sqlite3_errstr(rc));
if (rc == SQLITE_DONE) { // nothing retrieved
goto ERR_RETURN; // not really an error, but the cleanup at this point is the same.
}
int col_type = sqlite3_column_type(statement, 0);
Stopif(col_type == SQLITE_NULL, goto ERR_RETURN, "null data retrieved from cache");
Stopif(col_type != SQLITE_BLOB, goto ERR_RETURN, "invalid data type in cache");
unsigned char const *blob_data = sqlite3_column_blob(statement, 0);
int blob_size = sqlite3_column_bytes(statement, 0);
out_buf = uncompress_text(blob_size, (unsigned char *)blob_data);
rc = sqlite3_finalize(statement);
Stopif(rc != SQLITE_OK, exit(EXIT_FAILURE), "error finalizing select statement");
return out_buf;
ERR_RETURN:
rc = sqlite3_finalize(statement);
Stopif(rc != SQLITE_OK, exit(EXIT_FAILURE), "error finalizing select statement");
return out_buf;
}
int
cache_add(char const *site, time_t init_time, struct TextBuffer const buf[static 1])
{
assert(site);
assert(buf);
struct ByteBuffer compressed_buf = compress_text_buffer(buf);
char const *sql = "INSERT OR REPLACE INTO nbm (site, init_time, data) VALUES (?, ?, ?)";
sqlite3_stmt *statement = 0;
int rc = sqlite3_prepare_v2(cache, sql, -1, &statement, 0);
Stopif(rc != SQLITE_OK, exit(EXIT_FAILURE), "error preparing insert statement: %s",
sqlite3_errstr(rc));
rc = sqlite3_bind_text(statement, 1, site, -1, 0);
Stopif(rc != SQLITE_OK, goto ERR_RETURN, "error binding site.");
rc = sqlite3_bind_int64(statement, 2, init_time);
Stopif(rc != SQLITE_OK, goto ERR_RETURN, "error binding init_time.");
rc = sqlite3_bind_blob(statement, 3, compressed_buf.data, compressed_buf.size, 0);
Stopif(rc != SQLITE_OK, goto ERR_RETURN, "error binding compressed data.");
rc = sqlite3_step(statement);
Stopif(rc != SQLITE_DONE, goto ERR_RETURN, "error executing insert sql");
byte_buffer_clear(&compressed_buf);
rc = sqlite3_finalize(statement);
Stopif(rc != SQLITE_OK, exit(EXIT_FAILURE), "error finalizing insert statement");
return 0;
ERR_RETURN:
rc = sqlite3_finalize(statement);
Stopif(rc != SQLITE_OK, exit(EXIT_FAILURE), "error finalizing insert sql.");
byte_buffer_clear(&compressed_buf);
return -1;
}
| 30.896296 | 100 | 0.650803 |
6040e617b307b157272dd236a25ac88606229eb8 | 1,807 | h | C | ConfigurationWiFi_EXAMPLE.h | Thorinair/TwiFi-Module-RETIRED | ccc7ca049679e6a89f340fc2a9bc8b4458f67999 | [
"MIT"
] | null | null | null | ConfigurationWiFi_EXAMPLE.h | Thorinair/TwiFi-Module-RETIRED | ccc7ca049679e6a89f340fc2a9bc8b4458f67999 | [
"MIT"
] | null | null | null | ConfigurationWiFi_EXAMPLE.h | Thorinair/TwiFi-Module-RETIRED | ccc7ca049679e6a89f340fc2a9bc8b4458f67999 | [
"MIT"
] | null | null | null | /*******
* WiFi Configuration
*******/
/****** RENAME THIS FILE TO "ConfigurationWiFi.h" TO GET STARTED. ******/
/* Name of the device, as visible on the WiFi network. */
#define WIFI_HOST "TwiFi_Example"
/* WiFi network definitions. The device will attempt to connect to them in the order as defined here.
* The first one that works will become the default one once successfully connected.
* Up to 8 WiFi networks can be defined. Unused ones should be commented out.
* Second WiFi here is an example of a static IP. Use the | symbol to separate values:
* ip|gateway|subnet|dns1|dns2
*/
#define WIFI_0_SSID "ExampleWiFiName"
#define WIFI_0_PASS "password"
#define WIFI_0_CONF "DHCP"
#define WIFI_0_OPEN false
#define WIFI_1_SSID "AnotherWiFi"
#define WIFI_1_PASS "password"
#define WIFI_1_CONF "192.168.0.201|192.168.0.1|255.255.255.0|8.8.8.8|8.8.4.4"
#define WIFI_1_OPEN false
//#define WIFI_2_SSID ""
//#define WIFI_2_PASS ""
//#define WIFI_2_CONF ""
//#define WIFI_2_OPEN false
//#define WIFI_3_SSID ""
//#define WIFI_3_PASS ""
//#define WIFI_3_CONF ""
//#define WIFI_3_OPEN false
//#define WIFI_4_SSID ""
//#define WIFI_4_PASS ""
//#define WIFI_4_CONF ""
//#define WIFI_4_OPEN false
//#define WIFI_5_SSID ""
//#define WIFI_5_PASS ""
//#define WIFI_5_CONF ""
//#define WIFI_5_OPEN false
//#define WIFI_6_SSID ""
//#define WIFI_6_PASS ""
//#define WIFI_6_CONF ""
//#define WIFI_6_OPEN false
//#define WIFI_7_SSID ""
//#define WIFI_7_PASS ""
//#define WIFI_7_CONF ""
//#define WIFI_7_OPEN false
/* WiFi Timeout, time in seconds until WiFi connection is cancelled if unavailable. */
#define WIFI_TIMEOUT 10
/* EEPROM Addresses. Change these only if needed. */
#define EEPROM_SAVED 0
#define EEPROM_WIFI 1
/* Whether the WiFi connection should be debugged. */
#define WIFI_DEBUG true
| 27.8 | 101 | 0.726619 |
7801d264ee87e5a0bb178c33675cfa1659af1bd4 | 7,869 | h | C | AquaEngine/Core/Containers/HashMap.h | tiagovcosta/aquaengine | aea6de9f47ba0243b90c144dee4422efb2389cc7 | [
"MIT"
] | 55 | 2015-05-29T20:19:28.000Z | 2022-01-18T21:23:15.000Z | AquaEngine/Core/Containers/HashMap.h | tiagovcosta/aquaengine | aea6de9f47ba0243b90c144dee4422efb2389cc7 | [
"MIT"
] | null | null | null | AquaEngine/Core/Containers/HashMap.h | tiagovcosta/aquaengine | aea6de9f47ba0243b90c144dee4422efb2389cc7 | [
"MIT"
] | 6 | 2015-09-02T09:51:38.000Z | 2020-08-16T07:54:34.000Z | #pragma once
#include "Array.h"
#include "..\..\AquaTypes.h"
namespace aqua
{
template<class K, class V>
class HashMap
{
public:
HashMap(Allocator& allocator);
bool has(K key) const;
// Returns the value stored for the specified key, or default if the key
// does not exist in the hash.
const V& lookup(K key, const V& default) const;
void insert(K key, const V& value);
bool remove(K key);
void reserve(size_t size);
void clear();
//using Iterator = size_t;
struct Iterator
{
size_t index;
K key;
V* value;
};
Iterator begin();
Iterator next(const Iterator& it);
Iterator end();
private:
void rehash(size_t size);
enum State : u8
{
EMPTY,
FILLED,
REMOVED
};
struct Bucket
{
u8 hash : 6;
u8 state : 2;
};
static_assert(sizeof(Bucket) == sizeof(u8), "Check Bucket struct size");
Allocator* _allocator;
Array<Bucket> _buckets;
Array<K> _keys;
Array<V> _values;
size_t _size; //Number of used entries in the table
};
template<class K, class V>
HashMap<K, V>::HashMap(Allocator& allocator)
: _allocator(&allocator), _buckets(allocator), _keys(allocator), _values(allocator), _size(0)
{
}
template<class K, class V>
bool HashMap<K, V>::has(K key) const
{
size_t size = _buckets.size();
if(size == 0)
return false;
size_t start = key % size;
u8 hash6 = key & 0x3F;
// Search the buckets until we hit an empty one
for(size_t i = start; i < size; ++i)
{
const Bucket* bucket = &_buckets[i];
switch(bucket->state)
{
case State::EMPTY:
return false;
case State::FILLED:
if(hash6 == bucket->hash && _keys[i] == key)
return true;
break;
default:
break;
}
}
for(size_t i = 0; i < start; ++i)
{
const Bucket* bucket = &_buckets[i];
switch(bucket->state)
{
case State::EMPTY:
return false;
case State::FILLED:
if(hash6 == bucket->hash && _keys[i] == key)
return true;
break;
default:
break;
}
}
return false;
}
template<class K, class V>
const V& HashMap<K, V>::lookup(K key, const V& default) const
{
size_t size = _buckets.size();
if(size == 0)
return default;
size_t start = key % size;
u8 hash6 = key & 0x3F;
// Search the buckets until we hit an empty one
for(size_t i = start; i < size; ++i)
{
const Bucket* bucket = &_buckets[i];
switch(bucket->state)
{
case State::EMPTY:
return default;
case State::FILLED:
if(hash6 == bucket->hash && _keys[i] == key)
return _values[i];
break;
default:
break;
}
}
for(size_t i = 0; i < start; ++i)
{
const Bucket* bucket = &_buckets[i];
switch(bucket->state)
{
case State::EMPTY:
return default;
case State::FILLED:
if(hash6 == bucket->hash && _keys[i] == key)
return _values[i];
break;
default:
break;
}
}
return default;
}
template<class K, class V>
void HashMap<K, V>::insert(K key, const V& value)
{
size_t buckets_size = _buckets.size();
// Resize larger if the load factor goes over 2/3
if(_size * 3 >= buckets_size * 2)
{
rehash(buckets_size * 2 + 8);
buckets_size = _buckets.size();
}
size_t start = key % buckets_size;
u8 hash6 = key & 0x3F;
// Search for an unused bucket
Bucket* bucket = nullptr;
size_t bucket_index = 0;
for(size_t i = start; i < buckets_size; ++i)
{
Bucket* b = &_buckets[i];
if(b->state != State::FILLED)
{
bucket = b;
bucket_index = i;
break;
}
}
if(bucket == nullptr)
{
for(size_t i = 0; i < start; ++i)
{
Bucket* b = &_buckets[i];
if(b->state != State::FILLED)
{
bucket = b;
bucket_index = i;
break;
}
}
}
assert(bucket != nullptr);
// Store the hash, key, and value in the bucket
bucket->hash = hash6;
bucket->state = State::FILLED;
_keys[bucket_index] = key;
_values[bucket_index] = value;
++_size;
}
template<class K, class V>
bool HashMap<K, V>::remove(K key)
{
size_t size = _buckets.size();
if(size == 0)
return false;
size_t start = key % size;
u8 hash6 = key & 0x3F;
// Search the buckets until we hit an empty one
for(size_t i = start; i < size; ++i)
{
Bucket* b = &_buckets[i];
switch(b->state)
{
case State::EMPTY:
return false;
case State::FILLED:
if(b->hash == hash6 && _keys[i] == key)
{
b->hash = 0;
b->state = State::REMOVED;
--_size;
return true;
}
break;
default:
break;
}
}
for(size_t i = 0; i < start; ++i)
{
Bucket* b = &_buckets[i];
switch(b->state)
{
case State::EMPTY:
return false;
case State::FILLED:
if(b->hash == hash6 && _keys[i] == key)
{
b->hash = 0;
b->state = State::REMOVED;
--_size;
return true;
}
break;
default:
break;
}
}
return false;
}
template<class K, class V>
void HashMap<K, V>::reserve(size_t size)
{
rehash(size);
}
template<class K, class V>
void HashMap<K, V>::clear()
{
_buckets.clear();
_keys.clear();
_values.clear();
}
template<class K, class V>
void HashMap<K,V>::rehash(size_t size)
{
// Can't rehash down to smaller than current size or initial size
//bucketCountNew = std::max(std::max(bucketCountNew, size),
// size_t(s_hashTableInitialSize));
size = size > _size ? size : _size;
// Build a new set of buckets, keys, and values
Array<Bucket> buckets_new(*_allocator, size);
Array<K> keys_new(*_allocator, size);
Array<V> values_new(*_allocator, size);
Bucket temp;
temp.state = State::EMPTY;
buckets_new.resize(size, temp);
keys_new.resize(size);
values_new.resize(size);
// Walk through all the current elements and insert them into the new buckets
for(size_t i = 0, end = _buckets.size(); i < end; ++i)
{
Bucket* b = &_buckets[i];
if(b->state != State::FILLED)
continue;
K key = _keys[i];
// Hash the key and find the starting bucket
size_t start = key % size;
u8 hash6 = key & 0x3F;
// Search for an unused bucket
Bucket* bucket = nullptr;
size_t bucket_index = 0;
for(size_t j = start; j < size; ++j)
{
Bucket* b = &buckets_new[j];
if(b->state != State::FILLED)
{
bucket = b;
bucket_index = j;
break;
}
}
if(bucket == nullptr)
{
for(size_t j = 0; j < start; ++j)
{
Bucket* b = &buckets_new[j];
if(b->state != State::FILLED)
{
bucket = b;
bucket_index = j;
break;
}
}
}
assert(bucket != nullptr);
// Store the hash, key, and value in the bucket
bucket->hash = hash6;
bucket->state = State::FILLED;
keys_new[bucket_index] = key;
values_new[bucket_index] = std::move(_values[i]);
}
// Swap the new buckets, keys, and values into place
using std::swap;
swap(_buckets, buckets_new);
swap(_keys, keys_new);
swap(_values, values_new);
}
template<class K, class V>
typename HashMap<K, V>::Iterator HashMap<K, V>::begin()
{
size_t i = 0;
const size_t end = _buckets.size();
for(; i < end; ++i)
{
if(_buckets[i].state == State::FILLED)
break;
}
if(i == end)
return{ end, 0, nullptr };
return{ i, _keys[i], &_values[i] };
}
template<class K, class V>
typename HashMap<K, V>::Iterator HashMap<K, V>::next(const Iterator& it)
{
size_t i = it.index + 1;
const size_t end = _buckets.size();
for(; i < end; ++i)
{
if(_buckets[i].state == State::FILLED)
break;
}
if(i == end)
return{ end, 0, nullptr };
return{ i, _keys[i], &_values[i] };
}
template<class K, class V>
typename HashMap<K, V>::Iterator HashMap<K, V>::end()
{
return{ _buckets.size(), 0, nullptr };
}
} | 18.006865 | 95 | 0.582666 |
43345e092e9ddf7c288369baf4f991650ea321ef | 627 | h | C | iraf.v2161/sys/imfort/oif.h | ysBach/irafdocgen | b11fcd75cc44b01ae69c9c399e650ec100167a54 | [
"MIT"
] | 2 | 2019-12-01T15:19:09.000Z | 2019-12-02T16:48:42.000Z | sys/imfort/oif.h | kirxkirx/iraf | fcd7569b4e0ddbea29f7dbe534a25759e0c31883 | [
"MIT"
] | 1 | 2019-11-30T13:48:50.000Z | 2019-12-02T19:40:25.000Z | sys/imfort/oif.h | kirxkirx/iraf | fcd7569b4e0ddbea29f7dbe534a25759e0c31883 | [
"MIT"
] | null | null | null | # OIF.H -- IKI/OIF internal definitions.
define MAX_LENEXTN 3 # max length imagefile extension
define LEN_EXTN 3 # actual length imagefile extension
define OIF_HDREXTN "imh" # imheader filename extension
define OIF_PIXEXTN "pix" # pixel file extension
define LEN_PIXHDR 512 # length of PIXHDR structure
define COMPRESS NO # disable alignment of image lines?
define DEF_VERSION 2 # default file version
define HDR_EXTENSIONS "|^imh|" # legal header file extensions
define HDR "HDR$" # stands for header directory
define STRLEN_HDR 4
define TY_IMHDR 1 # main imagefile header
define TY_PIXHDR 2 # pixel file header
| 36.882353 | 61 | 0.781499 |
d1e84593fd951c166c04255b9a7d6fdce62d8b7c | 5,707 | h | C | StyleFramework/RegistryManager.h | edwig/StyleFramework | c9b19af74f5298c3da348cdbdc988191e76ed807 | [
"MIT"
] | 2 | 2021-04-03T12:50:30.000Z | 2022-02-08T23:23:56.000Z | StyleFramework/RegistryManager.h | edwig/Kwatta | ce1ca2907608e65ed62d7dbafa9ab1d030caccfe | [
"MIT"
] | 10 | 2022-01-14T13:28:32.000Z | 2022-02-13T12:46:34.000Z | StyleFramework/RegistryManager.h | edwig/Kwatta | ce1ca2907608e65ed62d7dbafa9ab1d030caccfe | [
"MIT"
] | null | null | null | //////////////////////////////////////////////////////////////////////////
//
// File: RegistryManager.h
// Function: Handling the registry
//
// _____ _ _ _ ______ _
// / ____| | | (_) | ____| | |
// | (___ | |_ _ _| |_ _ __ __ _| |__ _ __ __ _ _ __ ___ _____ _____ _ __| | __
// \___ \| __| | | | | | '_ \ / _` | __| '__/ _` | '_ ` _ \ / _ \ \ /\ / / _ \| '__| |/ /
// ____) | |_| |_| | | | | | | (_| | | | | | (_| | | | | | | __/\ V V / (_) | | | <
// |_____/ \__|\__, |_|_|_| |_|\__, |_| |_| \__,_|_| |_| |_|\___| \_/\_/ \___/|_| |_|\_\
// __/ | __/ |
// |___/ |___/
//
//
// Author: ir. W.E. Huisman
// For license: See the file "LICENSE.txt" in the root folder
//
#pragma once
// Setting the registry key and profile before starting
int SetRegistryProfile(CString p_key,CString p_program,CString p_version);
//////////////////////////////////////////////////////////////////////////
//
// RegistryManager is a stand alone registry management class.
// You can use it to read and write values from your system's registry
// Reading and writing to an *.INI file is an option.
// Use RegistryManager to read and write string, integer, and binary data
// to and from the registry.
//
//////////////////////////////////////////////////////////////////////////
class RegistryManager
{
private:
class AutoRegistryKey;
public:
RegistryManager(HKEY hKeyBase = HKEY_CURRENT_USER);
virtual ~RegistryManager();
void SetRegistryKey(LPCTSTR lpszRegistryKey, LPCTSTR lpszProfileName);
static void SetINIFileName(LPCTSTR strINIFileName);
static CString& GetINIFileName();
// SETTERS
BOOL SetRegistryInteger(LPCTSTR lpszSection, LPCTSTR lpszEntry, int nValue);
BOOL SetRegistryBinary (LPCTSTR lpszSection, LPCTSTR lpszEntry, LPBYTE pData, UINT nBytes);
BOOL SetRegistryString (LPCTSTR lpszSection, LPCTSTR lpszEntry, LPCTSTR lpszValue);
BOOL SetRegistryPoint (LPCTSTR lpszSection, LPCTSTR lpszEntry, CPoint* pValue);
BOOL SetRegistryRect (LPCTSTR lpszSection, LPCTSTR lpszEntry, CRect* pValue);
BOOL SetRegistrySize (LPCTSTR lpszSection, LPCTSTR lpszEntry, CSize* pValue);
BOOL SetRegistryDouble (LPCTSTR lpszSection, LPCTSTR lpszEntry, double* pValue);
BOOL SetRegistryDWORD (LPCTSTR lpszSection, LPCTSTR lpszEntry, DWORD* pValue);
BOOL SetRegistryColor (LPCTSTR lpszSection, LPCTSTR lpszEntry, COLORREF* pValue);
// GETTERS
UINT GetRegistryInteger(LPCTSTR lpszSection, LPCTSTR lpszEntry, int nDefault);
BOOL GetRegistryBinary (LPCTSTR lpszSection, LPCTSTR lpszEntry, BYTE** ppData, UINT* pBytes);
CString GetRegistryString (LPCTSTR lpszSection, LPCTSTR lpszEntry, LPCTSTR lpszDefault);
BOOL GetRegistryPoint (LPCTSTR lpszSection, LPCTSTR lpszEntry, CPoint* ptResult);
BOOL GetRegistryRect (LPCTSTR lpszSection, LPCTSTR lpszEntry, CRect* rcResult);
BOOL GetRegistrySize (LPCTSTR lpszSection, LPCTSTR lpszEntry, CSize* rcResult);
BOOL GetRegistryDouble (LPCTSTR lpszSection, LPCTSTR lpszEntry, double* rcResult);
BOOL GetRegistryDWORD (LPCTSTR lpszSection, LPCTSTR lpszEntry, DWORD* rcResult);
BOOL GetRegistryColor (LPCTSTR lpszSection, LPCTSTR lpszEntry, COLORREF* rcResult);
int EnumRegistryValues(LPCTSTR lpszSection, CMap<CString, LPCTSTR, DWORD, DWORD&>* mapItems, CStringArray* arrayNames);
int EnumRegistryKeys (LPCTSTR lpszSection, CStringArray & arrayKeys);
bool DeleteRegistryKey (LPCTSTR lpszSection, LPCTSTR lpszKey);
bool DeleteRegistryValue(LPCTSTR lpszSection, LPCTSTR lpszKey);
LONG GetErrorCode() const;
CString GetErrorMessage() const;
protected:
virtual HKEY GetAppRegistryKey(REGSAM samDesired = KEY_WRITE | KEY_READ);
virtual HKEY GetSectionKey(LPCTSTR lpszSection, REGSAM samDesired = KEY_WRITE | KEY_READ);
private:
BOOL GetProfileInfo();
LONG RecurseDeleteKey(HKEY hKey, LPCTSTR lpszKey);
HKEY GetFullPathKey(LPCTSTR lpszSection) const;
HKEY RecurseOpenKey(LPCTSTR lpszSection, REGSAM samDesired);
LONG m_lResult;
protected:
HKEY m_keyBase; // Handle to the registry key to use. The default is HKEY_CURRENT_USER.
CString m_registryKey; // Used to determine the full registry key for storing application profile settings.
CString m_profileName; // The application's .INI filename.
static CString m_iniFileName; // Name of an .INI file for registry settings.
};
//////////////////////////////////////////////////////////////////////
inline void
RegistryManager::SetINIFileName(LPCTSTR strINIFileName)
{
m_iniFileName = strINIFileName;
}
inline
CString& RegistryManager::GetINIFileName()
{
return m_iniFileName;
}
inline
LONG RegistryManager::GetErrorCode() const
{
return m_lResult;
}
/////////////////////////////////////////////////////////////////////////////
// CHKey - helper automatically closes open HKEY
class RegistryManager::AutoRegistryKey
{
public:
AutoRegistryKey(HKEY hKey = nullptr) : m_hKey(hKey)
{
}
~AutoRegistryKey()
{
if (m_hKey != nullptr)
{
::RegCloseKey(m_hKey);
}
}
HKEY& operator=(HKEY hKey)
{
if (m_hKey != nullptr)
{
::RegCloseKey(m_hKey);
}
m_hKey = hKey;
return m_hKey;
}
BOOL operator==(HKEY hKey)
{
return m_hKey == hKey;
}
operator HKEY() const
{
return m_hKey;
}
operator PHKEY()
{
return &m_hKey;
}
private:
HKEY m_hKey;
};
| 34.173653 | 126 | 0.620641 |
9e62f0899ab354873876eb7878f07c0bc8ba291d | 21,075 | c | C | arch/arm/src/stm32h7/stm32_irq.c | 19cb0475536a9e7d/nuttx | a8a695926832e408cccc29606ba41fa00e948c97 | [
"MIT"
] | 1 | 2021-02-05T03:21:54.000Z | 2021-02-05T03:21:54.000Z | arch/arm/src/stm32h7/stm32_irq.c | 19cb0475536a9e7d/nuttx | a8a695926832e408cccc29606ba41fa00e948c97 | [
"MIT"
] | 1 | 2020-03-21T16:25:54.000Z | 2020-03-24T09:41:03.000Z | arch/arm/src/stm32h7/stm32_irq.c | 19cb0475536a9e7d/nuttx | a8a695926832e408cccc29606ba41fa00e948c97 | [
"MIT"
] | null | null | null | /****************************************************************************
* arch/arm/src/stm32h7/stm32_irq.c
*
* Copyright (C) 2018 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <debug.h>
#include <nuttx/irq.h>
#include <nuttx/arch.h>
#include <arch/irq.h>
#include <arch/armv7-m/nvicpri.h>
#include "nvic.h"
#include "ram_vectors.h"
#include "up_arch.h"
#include "up_internal.h"
#ifdef CONFIG_STM32H7_GPIO_IRQ
# include "stm32_gpio.h"
#endif
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Get a 32-bit version of the default priority */
#define DEFPRIORITY32 \
(NVIC_SYSH_PRIORITY_DEFAULT << 24 | \
NVIC_SYSH_PRIORITY_DEFAULT << 16 | \
NVIC_SYSH_PRIORITY_DEFAULT << 8 | \
NVIC_SYSH_PRIORITY_DEFAULT)
/* Given the address of a NVIC ENABLE register, this is the offset to
* the corresponding CLEAR ENABLE register.
*/
#define NVIC_ENA_OFFSET (0)
#define NVIC_CLRENA_OFFSET (NVIC_IRQ0_31_CLEAR - NVIC_IRQ0_31_ENABLE)
/****************************************************************************
* Public Data
****************************************************************************/
/* g_current_regs[] holds a references to the current interrupt level
* register storage structure. If is non-NULL only during interrupt
* processing. Access to g_current_regs[] must be through the macro
* CURRENT_REGS for portability.
*/
volatile uint32_t *g_current_regs[1];
/* This is the address of the exception vector table (determined by the
* linker script).
*/
extern uint32_t _vectors[];
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: stm32_dumpnvic
*
* Description:
* Dump some interesting NVIC registers
*
****************************************************************************/
#if defined(CONFIG_DEBUG_IRQ_INFO)
static void stm32_dumpnvic(const char *msg, int irq)
{
irqstate_t flags;
flags = enter_critical_section();
irqinfo("NVIC (%s, irq=%d):\n", msg, irq);
irqinfo(" INTCTRL: %08x VECTAB: %08x\n",
getreg32(NVIC_INTCTRL), getreg32(NVIC_VECTAB));
#if 0
irqinfo(" SYSH ENABLE MEMFAULT: %08x BUSFAULT: %08x USGFAULT: %08x SYSTICK: %08x\n",
getreg32(NVIC_SYSHCON_MEMFAULTENA), getreg32(NVIC_SYSHCON_BUSFAULTENA),
getreg32(NVIC_SYSHCON_USGFAULTENA), getreg32(NVIC_SYSTICK_CTRL_ENABLE));
#endif
irqinfo(" IRQ ENABLE: %08x %08x %08x\n",
getreg32(NVIC_IRQ0_31_ENABLE), getreg32(NVIC_IRQ32_63_ENABLE),
getreg32(NVIC_IRQ64_95_ENABLE));
irqinfo(" SYSH_PRIO: %08x %08x %08x\n",
getreg32(NVIC_SYSH4_7_PRIORITY), getreg32(NVIC_SYSH8_11_PRIORITY),
getreg32(NVIC_SYSH12_15_PRIORITY));
irqinfo(" IRQ PRIO: %08x %08x %08x %08x\n",
getreg32(NVIC_IRQ0_3_PRIORITY), getreg32(NVIC_IRQ4_7_PRIORITY),
getreg32(NVIC_IRQ8_11_PRIORITY), getreg32(NVIC_IRQ12_15_PRIORITY));
#if STM32_IRQ_NEXTINTS > 15
irqinfo(" %08x %08x %08x %08x\n",
getreg32(NVIC_IRQ16_19_PRIORITY), getreg32(NVIC_IRQ20_23_PRIORITY),
getreg32(NVIC_IRQ24_27_PRIORITY), getreg32(NVIC_IRQ28_31_PRIORITY));
#endif
#if STM32_IRQ_NEXTINTS > 31
irqinfo(" %08x %08x %08x %08x\n",
getreg32(NVIC_IRQ32_35_PRIORITY), getreg32(NVIC_IRQ36_39_PRIORITY),
getreg32(NVIC_IRQ40_43_PRIORITY), getreg32(NVIC_IRQ44_47_PRIORITY));
#endif
#if STM32_IRQ_NEXTINTS > 47
irqinfo(" %08x %08x %08x %08x\n",
getreg32(NVIC_IRQ48_51_PRIORITY), getreg32(NVIC_IRQ52_55_PRIORITY),
getreg32(NVIC_IRQ56_59_PRIORITY), getreg32(NVIC_IRQ60_63_PRIORITY));
#endif
#if STM32_IRQ_NEXTINTS > 63
irqinfo(" %08x %08x %08x %08x\n",
getreg32(NVIC_IRQ64_67_PRIORITY), getreg32(NVIC_IRQ68_71_PRIORITY),
getreg32(NVIC_IRQ72_75_PRIORITY), getreg32(NVIC_IRQ76_79_PRIORITY));
#endif
#if STM32_IRQ_NEXTINTS > 79
irqinfo(" %08x %08x %08x %08x\n",
getreg32(NVIC_IRQ80_83_PRIORITY), getreg32(NVIC_IRQ84_87_PRIORITY),
getreg32(NVIC_IRQ88_91_PRIORITY), getreg32(NVIC_IRQ92_95_PRIORITY));
#endif
#if STM32_IRQ_NEXTINTS > 95
irqinfo(" %08x %08x %08x %08x\n",
getreg32(NVIC_IRQ96_99_PRIORITY), getreg32(NVIC_IRQ100_103_PRIORITY),
getreg32(NVIC_IRQ104_107_PRIORITY), getreg32(NVIC_IRQ108_111_PRIORITY));
#endif
#if STM32_IRQ_NEXTINTS > 111
# warning Missing logic
#endif
// TODO: Make sure this covers all interrupts that are available.
leave_critical_section(flags);
}
#else
# define stm32_dumpnvic(msg, irq)
#endif
/****************************************************************************
* Name: stm32_nmi, stm32_busfault, stm32_usagefault, stm32_pendsv, stm32_dbgmonitor,
* stm32_pendsv, stm32_reserved
*
* Description:
* Handlers for various exceptions. None are handled and all are fatal
* error conditions. The only advantage these provided over the default
* unexpected interrupt handler is that they provide a diagnostic output.
*
****************************************************************************/
#ifdef CONFIG_DEBUG_FEATURES
static int stm32_nmi(int irq, FAR void *context, FAR void *arg)
{
up_irq_save();
_err("PANIC!!! NMI received\n");
PANIC();
return 0;
}
static int stm32_busfault(int irq, FAR void *context, FAR void *arg)
{
up_irq_save();
_err("PANIC!!! Bus fault received: %08x\n", getreg32(NVIC_CFAULTS));
PANIC();
return 0;
}
static int stm32_usagefault(int irq, FAR void *context, FAR void *arg)
{
up_irq_save();
_err("PANIC!!! Usage fault received: %08x\n", getreg32(NVIC_CFAULTS));
PANIC();
return 0;
}
static int stm32_pendsv(int irq, FAR void *context, FAR void *arg)
{
up_irq_save();
_err("PANIC!!! PendSV received\n");
PANIC();
return 0;
}
static int stm32_dbgmonitor(int irq, FAR void *context, FAR void *arg)
{
up_irq_save();
_err("PANIC!!! Debug Monitor received\n");
PANIC();
return 0;
}
static int stm32_reserved(int irq, FAR void *context, FAR void *arg)
{
up_irq_save();
_err("PANIC!!! Reserved interrupt\n");
PANIC();
return 0;
}
#endif
/****************************************************************************
* Name: stm32_prioritize_syscall
*
* Description:
* Set the priority of an exception. This function may be needed
* internally even if support for prioritized interrupts is not enabled.
*
****************************************************************************/
#ifdef CONFIG_ARMV7M_USEBASEPRI
static inline void stm32_prioritize_syscall(int priority)
{
uint32_t regval;
/* SVCALL is system handler 11 */
regval = getreg32(NVIC_SYSH8_11_PRIORITY);
regval &= ~NVIC_SYSH_PRIORITY_PR11_MASK;
regval |= (priority << NVIC_SYSH_PRIORITY_PR11_SHIFT);
putreg32(regval, NVIC_SYSH8_11_PRIORITY);
}
#endif
/****************************************************************************
* Name: stm32_irqinfo
*
* Description:
* Given an IRQ number, provide the register and bit setting to enable or
* disable the irq.
*
****************************************************************************/
static int stm32_irqinfo(int irq, uintptr_t *regaddr, uint32_t *bit,
uintptr_t offset)
{
unsigned int extint = irq - STM32_IRQ_FIRST;
DEBUGASSERT(irq >= STM32_IRQ_NMI && irq < NR_IRQS);
/* Check for external interrupt */
if (irq >= STM32_IRQ_FIRST)
{
#if STM32_IRQ_NEXTINTS <= 32
if (extint < STM32_IRQ_NEXTINTS)
{
*regaddr = (NVIC_IRQ0_31_ENABLE + offset);
*bit = 1 << extint;
}
else
#elif STM32_IRQ_NEXTINTS <= 64
if (extint < 32)
{
*regaddr = (NVIC_IRQ0_31_ENABLE + offset);
*bit = 1 << extint;
}
else if (extint < STM32_IRQ_NEXTINTS)
{
*regaddr = (NVIC_IRQ32_63_ENABLE + offset);
*bit = 1 << (extint - 32);
}
else
#elif STM32_IRQ_NEXTINTS <= 96
if (extint < 32)
{
*regaddr = (NVIC_IRQ0_31_ENABLE + offset);
*bit = 1 << extint;
}
else if (extint < 64)
{
*regaddr = (NVIC_IRQ32_63_ENABLE + offset);
*bit = 1 << (extint - 32);
}
else if (extint < STM32_IRQ_NEXTINTS)
{
*regaddr = (NVIC_IRQ64_95_ENABLE + offset);
*bit = 1 << (extint - 64);
}
else
#elif STM32_IRQ_NEXTINTS <= 128
if (extint < 32)
{
*regaddr = (NVIC_IRQ0_31_ENABLE + offset);
*bit = 1 << extint;
}
else if (extint < 64)
{
*regaddr = (NVIC_IRQ32_63_ENABLE + offset);
*bit = 1 << (extint - 32);
}
else if (extint < 96)
{
*regaddr = (NVIC_IRQ64_95_ENABLE + offset);
*bit = 1 << (extint - 64);
}
else if (extint < STM32_IRQ_NEXTINTS)
{
*regaddr = (NVIC_IRQ96_127_ENABLE + offset);
*bit = 1 << (extint - 96);
}
else
#elif STM32_IRQ_NEXTINTS <= 160
if (extint < 32)
{
*regaddr = (NVIC_IRQ0_31_ENABLE + offset);
*bit = 1 << extint;
}
else if (extint < 64)
{
*regaddr = (NVIC_IRQ32_63_ENABLE + offset);
*bit = 1 << (extint - 32);
}
else if (extint < 96)
{
*regaddr = (NVIC_IRQ64_95_ENABLE + offset);
*bit = 1 << (extint - 64);
}
else if (extint < 128)
{
*regaddr = (NVIC_IRQ96_127_ENABLE + offset);
*bit = 1 << (extint - 96);
}
else if (extint < STM32_IRQ_NEXTINTS)
{
*regaddr = (NVIC_IRQ128_159_ENABLE + offset);
*bit = 1 << (extint - 128);
}
else
#else
# warning Missing logic
#endif
{
return ERROR; /* Invalid interrupt */
}
}
/* Handle processor exceptions. Only a few can be disabled */
else
{
*regaddr = NVIC_SYSHCON;
if (irq == STM32_IRQ_MEMFAULT)
{
*bit = NVIC_SYSHCON_MEMFAULTENA;
}
else if (irq == STM32_IRQ_BUSFAULT)
{
*bit = NVIC_SYSHCON_BUSFAULTENA;
}
else if (irq == STM32_IRQ_USAGEFAULT)
{
*bit = NVIC_SYSHCON_USGFAULTENA;
}
else if (irq == STM32_IRQ_SYSTICK)
{
*regaddr = NVIC_SYSTICK_CTRL;
*bit = NVIC_SYSTICK_CTRL_ENABLE;
}
else
{
return ERROR; /* Invalid or unsupported exception */
}
}
return OK;
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: up_irqinitialize
****************************************************************************/
void up_irqinitialize(void)
{
uintptr_t regaddr;
#if defined(CONFIG_DEBUG_SYMBOLS) && !defined(CONFIG_ARMV7M_USEBASEPRI)
uint32_t regval;
#endif
int nintlines;
int i;
/* The NVIC ICTR register (bits 0-4) holds the number of of interrupt
* lines that the NVIC supports, defined in groups of 32. That is,
* the total number of interrupt lines is up to (32*(INTLINESNUM+1)).
*
* 0 -> 32 interrupt lines, 1 enable register, 8 priority registers
* 1 -> 64 " " " ", 2 enable registers, 16 priority registers
* 2 -> 96 " " " ", 3 enable registers, 24 priority registers
* ...
*/
nintlines = (getreg32(NVIC_ICTR) & NVIC_ICTR_INTLINESNUM_MASK) + 1;
/* Disable all interrupts. There are nintlines interrupt enable
* registers.
*/
for (i = nintlines, regaddr = NVIC_IRQ0_31_CLEAR;
i > 0;
i--, regaddr += 4)
{
putreg32(0xffffffff, regaddr);
}
/* Make sure that we are using the correct vector table. The default
* vector address is 0x0000:0000 but if we are executing code that is
* positioned in SRAM or in external FLASH, then we may need to reset
* the interrupt vector so that it refers to the table in SRAM or in
* external FLASH.
*/
putreg32((uint32_t)_vectors, NVIC_VECTAB);
#ifdef CONFIG_ARCH_RAMVECTORS
/* If CONFIG_ARCH_RAMVECTORS is defined, then we are using a RAM-based
* vector table that requires special initialization.
*/
up_ramvec_initialize();
#endif
/* Set all interrupts (and exceptions) to the default priority */
putreg32(DEFPRIORITY32, NVIC_SYSH4_7_PRIORITY);
putreg32(DEFPRIORITY32, NVIC_SYSH8_11_PRIORITY);
putreg32(DEFPRIORITY32, NVIC_SYSH12_15_PRIORITY);
/* Now set all of the interrupt lines to the default priority. There are
* nintlines * 8 priority registers.
*/
for (i = (nintlines << 3), regaddr = NVIC_IRQ0_3_PRIORITY;
i > 0;
i--, regaddr += 4)
{
putreg32(DEFPRIORITY32, regaddr);
}
/* currents_regs is non-NULL only while processing an interrupt */
CURRENT_REGS = NULL;
/* Attach the SVCall and Hard Fault exception handlers. The SVCall
* exception is used for performing context switches; The Hard Fault
* must also be caught because a SVCall may show up as a Hard Fault
* under certain conditions.
*/
irq_attach(STM32_IRQ_SVCALL, up_svcall, NULL);
irq_attach(STM32_IRQ_HARDFAULT, up_hardfault, NULL);
/* Set the priority of the SVCall interrupt */
#ifdef CONFIG_ARCH_IRQPRIO
/* up_prioritize_irq(STM32_IRQ_PENDSV, NVIC_SYSH_PRIORITY_MIN); */
#endif
#ifdef CONFIG_ARMV7M_USEBASEPRI
stm32_prioritize_syscall(NVIC_SYSH_SVCALL_PRIORITY);
#endif
/* If the MPU is enabled, then attach and enable the Memory Management
* Fault handler.
*/
#ifdef CONFIG_ARM_MPU
irq_attach(STM32_IRQ_MEMFAULT, up_memfault, NULL);
up_enable_irq(STM32_IRQ_MEMFAULT);
#endif
/* Attach all other processor exceptions (except reset and sys tick) */
#ifdef CONFIG_DEBUG_FEATURES
irq_attach(STM32_IRQ_NMI, stm32_nmi, NULL);
#ifndef CONFIG_ARM_MPU
irq_attach(STM32_IRQ_MEMFAULT, up_memfault, NULL);
#endif
irq_attach(STM32_IRQ_BUSFAULT, stm32_busfault, NULL);
irq_attach(STM32_IRQ_USAGEFAULT, stm32_usagefault, NULL);
irq_attach(STM32_IRQ_PENDSV, stm32_pendsv, NULL);
irq_attach(STM32_IRQ_DBGMONITOR, stm32_dbgmonitor, NULL);
irq_attach(STM32_IRQ_RESERVED, stm32_reserved, NULL);
#endif
stm32_dumpnvic("initial", NR_IRQS);
/* If a debugger is connected, try to prevent it from catching hardfaults.
* If CONFIG_ARMV7M_USEBASEPRI, no hardfaults are expected in normal
* operation.
*/
#if defined(CONFIG_DEBUG_SYMBOLS) && !defined(CONFIG_ARMV7M_USEBASEPRI)
regval = getreg32(NVIC_DEMCR);
regval &= ~NVIC_DEMCR_VCHARDERR;
putreg32(regval, NVIC_DEMCR);
#endif
#ifndef CONFIG_SUPPRESS_INTERRUPTS
/* Initialize logic to support a second level of interrupt decoding for
* GPIO pins.
*/
#ifdef CONFIG_STM32H7_GPIO_IRQ
stm32_gpioirqinitialize();
#endif
/* And finally, enable interrupts */
up_irq_enable();
#endif
}
/****************************************************************************
* Name: up_disable_irq
*
* Description:
* Disable the IRQ specified by 'irq'
*
****************************************************************************/
void up_disable_irq(int irq)
{
uintptr_t regaddr;
uint32_t regval;
uint32_t bit;
if (stm32_irqinfo(irq, ®addr, &bit, NVIC_CLRENA_OFFSET) == 0)
{
/* Modify the appropriate bit in the register to disable the interrupt.
* For normal interrupts, we need to set the bit in the associated
* Interrupt Clear Enable register. For other exceptions, we need to
* clear the bit in the System Handler Control and State Register.
*/
if (irq >= STM32_IRQ_FIRST)
{
putreg32(bit, regaddr);
}
else
{
regval = getreg32(regaddr);
regval &= ~bit;
putreg32(regval, regaddr);
}
}
#ifdef CONFIG_STM32H7_GPIO_IRQ
else
{
/* Maybe it is a (derived) GPIO IRQ */
stm32_gpioirqdisable(irq);
}
#endif
#if 0 /* Might be useful in early bring-up */
stm32_dumpnvic("disable", irq);
#endif
}
/****************************************************************************
* Name: up_enable_irq
*
* Description:
* Enable the IRQ specified by 'irq'
*
****************************************************************************/
void up_enable_irq(int irq)
{
uintptr_t regaddr;
uint32_t regval;
uint32_t bit;
if (stm32_irqinfo(irq, ®addr, &bit, NVIC_ENA_OFFSET) == 0)
{
/* Modify the appropriate bit in the register to enable the interrupt.
* For normal interrupts, we need to set the bit in the associated
* Interrupt Set Enable register. For other exceptions, we need to
* set the bit in the System Handler Control and State Register.
*/
if (irq >= STM32_IRQ_FIRST)
{
putreg32(bit, regaddr);
}
else
{
regval = getreg32(regaddr);
regval |= bit;
putreg32(regval, regaddr);
}
}
#ifdef CONFIG_STM32H7_GPIO_IRQ
else
{
/* Maybe it is a (derived) GPIO IRQ */
stm32_gpioirqenable(irq);
}
#endif
#if 0 /* Might be useful in early bring-up */
stm32_dumpnvic("enable", irq);
#endif
}
/****************************************************************************
* Name: up_ack_irq
*
* Description:
* Acknowledge the IRQ
*
****************************************************************************/
void up_ack_irq(int irq)
{
}
/****************************************************************************
* Name: up_prioritize_irq
*
* Description:
* Set the priority of an IRQ.
*
* Since this API is not supported on all architectures, it should be
* avoided in common implementations where possible.
*
****************************************************************************/
#ifdef CONFIG_ARCH_IRQPRIO
int up_prioritize_irq(int irq, int priority)
{
uint32_t regaddr;
uint32_t regval;
int shift;
DEBUGASSERT(irq >= STM32_IRQ_MEMFAULT && irq < NR_IRQS &&
(unsigned)priority <= NVIC_SYSH_PRIORITY_MIN);
if (irq < STM32_IRQ_FIRST)
{
/* NVIC_SYSH_PRIORITY() maps {0..15} to one of three priority
* registers (0-3 are invalid)
*/
regaddr = NVIC_SYSH_PRIORITY(irq);
irq -= 4;
}
else
{
/* NVIC_IRQ_PRIORITY() maps {0..} to one of many priority registers */
irq -= STM32_IRQ_FIRST;
regaddr = NVIC_IRQ_PRIORITY(irq);
}
regval = getreg32(regaddr);
shift = ((irq & 3) << 3);
regval &= ~(0xff << shift);
regval |= (priority << shift);
putreg32(regval, regaddr);
stm32_dumpnvic("prioritize", irq);
return OK;
}
#endif
| 29.93608 | 87 | 0.583962 |
b143ec733d32378566e11f991336b2a93c6c33cb | 402 | h | C | Tests/support/summation_kernel.h | Freddan-67/V3DLib | dcefc24a9a399ee1f5d1aa5529f44d9fd2486929 | [
"MIT"
] | 44 | 2021-01-16T14:17:15.000Z | 2022-03-11T19:53:59.000Z | Tests/support/summation_kernel.h | RcCreeperTech/V3DLib | 38eb8d55b8276de5cf703d8e13fb9b5f220c49f0 | [
"MIT"
] | 8 | 2021-01-16T17:52:02.000Z | 2021-12-18T22:38:00.000Z | Tests/support/summation_kernel.h | RcCreeperTech/V3DLib | 38eb8d55b8276de5cf703d8e13fb9b5f220c49f0 | [
"MIT"
] | 7 | 2021-01-16T14:25:47.000Z | 2022-02-03T16:34:45.000Z | #ifndef _TEST_SUPPORT_SUMMATION_KERNEL_H
#define _TEST_SUPPORT_SUMMATION_KERNEL_H
#include <vector>
#include "support.h"
extern std::vector<uint64_t> summation;
V3DLib::v3d::ByteCode summation_kernel(uint8_t num_qpus, int unroll_shift, int code_offset = 0);
void run_summation_kernel(V3DLib::v3d::ByteCode &bytecode, uint8_t num_qpus, int unroll_shift);
#endif // _TEST_SUPPORT_SUMMATION_KERNEL_H
| 33.5 | 96 | 0.820896 |
7f46932d80f1b4f3dcaa63c211d6f81736c1d906 | 15,301 | h | C | qemu-4.2.0/include/block/nbd.h | MisaZhu/qemu_raspi | 50d71ce87bb39470e6725f7428e4b6b9e1ed0359 | [
"Apache-2.0"
] | null | null | null | qemu-4.2.0/include/block/nbd.h | MisaZhu/qemu_raspi | 50d71ce87bb39470e6725f7428e4b6b9e1ed0359 | [
"Apache-2.0"
] | null | null | null | qemu-4.2.0/include/block/nbd.h | MisaZhu/qemu_raspi | 50d71ce87bb39470e6725f7428e4b6b9e1ed0359 | [
"Apache-2.0"
] | 1 | 2020-05-25T09:49:33.000Z | 2020-05-25T09:49:33.000Z | /*
* Copyright (C) 2016-2019 Red Hat, Inc.
* Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
*
* Network Block Device
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; under version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef NBD_H
#define NBD_H
#include "qapi/qapi-types-block.h"
#include "io/channel-socket.h"
#include "crypto/tlscreds.h"
#include "qapi/error.h"
/* Handshake phase structs - this struct is passed on the wire */
struct NBDOption {
uint64_t magic; /* NBD_OPTS_MAGIC */
uint32_t option; /* NBD_OPT_* */
uint32_t length;
} QEMU_PACKED;
typedef struct NBDOption NBDOption;
struct NBDOptionReply {
uint64_t magic; /* NBD_REP_MAGIC */
uint32_t option; /* NBD_OPT_* */
uint32_t type; /* NBD_REP_* */
uint32_t length;
} QEMU_PACKED;
typedef struct NBDOptionReply NBDOptionReply;
typedef struct NBDOptionReplyMetaContext {
NBDOptionReply h; /* h.type = NBD_REP_META_CONTEXT, h.length > 4 */
uint32_t context_id;
/* meta context name follows */
} QEMU_PACKED NBDOptionReplyMetaContext;
/* Transmission phase structs
*
* Note: these are _NOT_ the same as the network representation of an NBD
* request and reply!
*/
struct NBDRequest {
uint64_t handle;
uint64_t from;
uint32_t len;
uint16_t flags; /* NBD_CMD_FLAG_* */
uint16_t type; /* NBD_CMD_* */
};
typedef struct NBDRequest NBDRequest;
typedef struct NBDSimpleReply {
uint32_t magic; /* NBD_SIMPLE_REPLY_MAGIC */
uint32_t error;
uint64_t handle;
} QEMU_PACKED NBDSimpleReply;
/* Header of all structured replies */
typedef struct NBDStructuredReplyChunk {
uint32_t magic; /* NBD_STRUCTURED_REPLY_MAGIC */
uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
uint16_t type; /* NBD_REPLY_TYPE_* */
uint64_t handle; /* request handle */
uint32_t length; /* length of payload */
} QEMU_PACKED NBDStructuredReplyChunk;
typedef union NBDReply {
NBDSimpleReply simple;
NBDStructuredReplyChunk structured;
struct {
/* @magic and @handle fields have the same offset and size both in
* simple reply and structured reply chunk, so let them be accessible
* without ".simple." or ".structured." specification
*/
uint32_t magic;
uint32_t _skip;
uint64_t handle;
} QEMU_PACKED;
} NBDReply;
/* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */
typedef struct NBDStructuredReadData {
NBDStructuredReplyChunk h; /* h.length >= 9 */
uint64_t offset;
/* At least one byte of data payload follows, calculated from h.length */
} QEMU_PACKED NBDStructuredReadData;
/* Complete chunk for NBD_REPLY_TYPE_OFFSET_HOLE */
typedef struct NBDStructuredReadHole {
NBDStructuredReplyChunk h; /* h.length == 12 */
uint64_t offset;
uint32_t length;
} QEMU_PACKED NBDStructuredReadHole;
/* Header of all NBD_REPLY_TYPE_ERROR* errors */
typedef struct NBDStructuredError {
NBDStructuredReplyChunk h; /* h.length >= 6 */
uint32_t error;
uint16_t message_length;
} QEMU_PACKED NBDStructuredError;
/* Header of NBD_REPLY_TYPE_BLOCK_STATUS */
typedef struct NBDStructuredMeta {
NBDStructuredReplyChunk h; /* h.length >= 12 (at least one extent) */
uint32_t context_id;
/* extents follows */
} QEMU_PACKED NBDStructuredMeta;
/* Extent chunk for NBD_REPLY_TYPE_BLOCK_STATUS */
typedef struct NBDExtent {
uint32_t length;
uint32_t flags; /* NBD_STATE_* */
} QEMU_PACKED NBDExtent;
/* Transmission (export) flags: sent from server to client during handshake,
but describe what will happen during transmission */
enum {
NBD_FLAG_HAS_FLAGS_BIT = 0, /* Flags are there */
NBD_FLAG_READ_ONLY_BIT = 1, /* Device is read-only */
NBD_FLAG_SEND_FLUSH_BIT = 2, /* Send FLUSH */
NBD_FLAG_SEND_FUA_BIT = 3, /* Send FUA (Force Unit Access) */
NBD_FLAG_ROTATIONAL_BIT = 4, /* Use elevator algorithm -
rotational media */
NBD_FLAG_SEND_TRIM_BIT = 5, /* Send TRIM (discard) */
NBD_FLAG_SEND_WRITE_ZEROES_BIT = 6, /* Send WRITE_ZEROES */
NBD_FLAG_SEND_DF_BIT = 7, /* Send DF (Do not Fragment) */
NBD_FLAG_CAN_MULTI_CONN_BIT = 8, /* Multi-client cache consistent */
NBD_FLAG_SEND_RESIZE_BIT = 9, /* Send resize */
NBD_FLAG_SEND_CACHE_BIT = 10, /* Send CACHE (prefetch) */
NBD_FLAG_SEND_FAST_ZERO_BIT = 11, /* FAST_ZERO flag for WRITE_ZEROES */
};
#define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT)
#define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT)
#define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT)
#define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT)
#define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT)
#define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT)
#define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT)
#define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT)
#define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT)
#define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT)
#define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT)
#define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT)
/* New-style handshake (global) flags, sent from server to client, and
control what will happen during handshake phase. */
#define NBD_FLAG_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */
#define NBD_FLAG_NO_ZEROES (1 << 1) /* End handshake without zeroes. */
/* New-style client flags, sent from client to server to control what happens
during handshake phase. */
#define NBD_FLAG_C_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */
#define NBD_FLAG_C_NO_ZEROES (1 << 1) /* End handshake without zeroes. */
/* Option requests. */
#define NBD_OPT_EXPORT_NAME (1)
#define NBD_OPT_ABORT (2)
#define NBD_OPT_LIST (3)
/* #define NBD_OPT_PEEK_EXPORT (4) not in use */
#define NBD_OPT_STARTTLS (5)
#define NBD_OPT_INFO (6)
#define NBD_OPT_GO (7)
#define NBD_OPT_STRUCTURED_REPLY (8)
#define NBD_OPT_LIST_META_CONTEXT (9)
#define NBD_OPT_SET_META_CONTEXT (10)
/* Option reply types. */
#define NBD_REP_ERR(value) ((UINT32_C(1) << 31) | (value))
#define NBD_REP_ACK (1) /* Data sending finished. */
#define NBD_REP_SERVER (2) /* Export description. */
#define NBD_REP_INFO (3) /* NBD_OPT_INFO/GO. */
#define NBD_REP_META_CONTEXT (4) /* NBD_OPT_{LIST,SET}_META_CONTEXT */
#define NBD_REP_ERR_UNSUP NBD_REP_ERR(1) /* Unknown option */
#define NBD_REP_ERR_POLICY NBD_REP_ERR(2) /* Server denied */
#define NBD_REP_ERR_INVALID NBD_REP_ERR(3) /* Invalid length */
#define NBD_REP_ERR_PLATFORM NBD_REP_ERR(4) /* Not compiled in */
#define NBD_REP_ERR_TLS_REQD NBD_REP_ERR(5) /* TLS required */
#define NBD_REP_ERR_UNKNOWN NBD_REP_ERR(6) /* Export unknown */
#define NBD_REP_ERR_SHUTDOWN NBD_REP_ERR(7) /* Server shutting down */
#define NBD_REP_ERR_BLOCK_SIZE_REQD NBD_REP_ERR(8) /* Need INFO_BLOCK_SIZE */
/* Info types, used during NBD_REP_INFO */
#define NBD_INFO_EXPORT 0
#define NBD_INFO_NAME 1
#define NBD_INFO_DESCRIPTION 2
#define NBD_INFO_BLOCK_SIZE 3
/* Request flags, sent from client to server during transmission phase */
#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
#define NBD_CMD_FLAG_REQ_ONE (1 << 3) /* only one extent in BLOCK_STATUS
* reply chunk */
#define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */
/* Supported request types */
enum {
NBD_CMD_READ = 0,
NBD_CMD_WRITE = 1,
NBD_CMD_DISC = 2,
NBD_CMD_FLUSH = 3,
NBD_CMD_TRIM = 4,
NBD_CMD_CACHE = 5,
NBD_CMD_WRITE_ZEROES = 6,
NBD_CMD_BLOCK_STATUS = 7,
};
#define NBD_DEFAULT_PORT 10809
/* Maximum size of a single READ/WRITE data buffer */
#define NBD_MAX_BUFFER_SIZE (32 * 1024 * 1024)
/*
* Maximum size of a protocol string (export name, meta context name,
* etc.). Use malloc rather than stack allocation for storage of a
* string.
*/
#define NBD_MAX_STRING_SIZE 4096
/* Two types of reply structures */
#define NBD_SIMPLE_REPLY_MAGIC 0x67446698
#define NBD_STRUCTURED_REPLY_MAGIC 0x668e33ef
/* Structured reply flags */
#define NBD_REPLY_FLAG_DONE (1 << 0) /* This reply-chunk is last */
/* Structured reply types */
#define NBD_REPLY_ERR(value) ((1 << 15) | (value))
#define NBD_REPLY_TYPE_NONE 0
#define NBD_REPLY_TYPE_OFFSET_DATA 1
#define NBD_REPLY_TYPE_OFFSET_HOLE 2
#define NBD_REPLY_TYPE_BLOCK_STATUS 5
#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
/* Extent flags for base:allocation in NBD_REPLY_TYPE_BLOCK_STATUS */
#define NBD_STATE_HOLE (1 << 0)
#define NBD_STATE_ZERO (1 << 1)
/* Extent flags for qemu:dirty-bitmap in NBD_REPLY_TYPE_BLOCK_STATUS */
#define NBD_STATE_DIRTY (1 << 0)
static inline bool nbd_reply_type_is_error(int type)
{
return type & (1 << 15);
}
/* NBD errors are based on errno numbers, so there is a 1:1 mapping,
* but only a limited set of errno values is specified in the protocol.
* Everything else is squashed to EINVAL.
*/
#define NBD_SUCCESS 0
#define NBD_EPERM 1
#define NBD_EIO 5
#define NBD_ENOMEM 12
#define NBD_EINVAL 22
#define NBD_ENOSPC 28
#define NBD_EOVERFLOW 75
#define NBD_ENOTSUP 95
#define NBD_ESHUTDOWN 108
/* Details collected by NBD_OPT_EXPORT_NAME and NBD_OPT_GO */
struct NBDExportInfo {
/* Set by client before nbd_receive_negotiate() */
bool request_sizes;
char *x_dirty_bitmap;
/* Set by client before nbd_receive_negotiate(), or by server results
* during nbd_receive_export_list() */
char *name; /* must be non-NULL */
/* In-out fields, set by client before nbd_receive_negotiate() and
* updated by server results during nbd_receive_negotiate() */
bool structured_reply;
bool base_allocation; /* base:allocation context for NBD_CMD_BLOCK_STATUS */
/* Set by server results during nbd_receive_negotiate() and
* nbd_receive_export_list() */
uint64_t size;
uint16_t flags;
uint32_t min_block;
uint32_t opt_block;
uint32_t max_block;
uint32_t context_id;
/* Set by server results during nbd_receive_export_list() */
char *description;
int n_contexts;
char **contexts;
};
typedef struct NBDExportInfo NBDExportInfo;
int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
QCryptoTLSCreds *tlscreds,
const char *hostname, QIOChannel **outioc,
NBDExportInfo *info, Error **errp);
void nbd_free_export_list(NBDExportInfo *info, int count);
int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
const char *hostname, NBDExportInfo **info,
Error **errp);
int nbd_init(int fd, QIOChannelSocket *sioc, NBDExportInfo *info,
Error **errp);
int nbd_send_request(QIOChannel *ioc, NBDRequest *request);
int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc,
NBDReply *reply, Error **errp);
int nbd_client(int fd);
int nbd_disconnect(int fd);
int nbd_errno_to_system_errno(int err);
typedef struct NBDExport NBDExport;
typedef struct NBDClient NBDClient;
NBDExport *nbd_export_new(BlockDriverState *bs, uint64_t dev_offset,
uint64_t size, const char *name, const char *desc,
const char *bitmap, bool readonly, bool shared,
void (*close)(NBDExport *), bool writethrough,
BlockBackend *on_eject_blk, Error **errp);
void nbd_export_close(NBDExport *exp);
void nbd_export_remove(NBDExport *exp, NbdServerRemoveMode mode, Error **errp);
void nbd_export_get(NBDExport *exp);
void nbd_export_put(NBDExport *exp);
BlockBackend *nbd_export_get_blockdev(NBDExport *exp);
AioContext *nbd_export_aio_context(NBDExport *exp);
NBDExport *nbd_export_find(const char *name);
void nbd_export_close_all(void);
void nbd_client_new(QIOChannelSocket *sioc,
QCryptoTLSCreds *tlscreds,
const char *tlsauthz,
void (*close_fn)(NBDClient *, bool));
void nbd_client_get(NBDClient *client);
void nbd_client_put(NBDClient *client);
void nbd_server_start(SocketAddress *addr, const char *tls_creds,
const char *tls_authz, Error **errp);
/* nbd_read
* Reads @size bytes from @ioc. Returns 0 on success.
*/
static inline int nbd_read(QIOChannel *ioc, void *buffer, size_t size,
const char *desc, Error **errp)
{
int ret = qio_channel_read_all(ioc, buffer, size, errp) < 0 ? -EIO : 0;
if (ret < 0) {
if (desc) {
error_prepend(errp, "Failed to read %s: ", desc);
}
return -1;
}
return 0;
}
#define DEF_NBD_READ_N(bits) \
static inline int nbd_read##bits(QIOChannel *ioc, \
uint##bits##_t *val, \
const char *desc, Error **errp) \
{ \
if (nbd_read(ioc, val, sizeof(*val), desc, errp) < 0) { \
return -1; \
} \
*val = be##bits##_to_cpu(*val); \
return 0; \
}
DEF_NBD_READ_N(16) /* Defines nbd_read16(). */
DEF_NBD_READ_N(32) /* Defines nbd_read32(). */
DEF_NBD_READ_N(64) /* Defines nbd_read64(). */
#undef DEF_NBD_READ_N
static inline bool nbd_reply_is_simple(NBDReply *reply)
{
return reply->magic == NBD_SIMPLE_REPLY_MAGIC;
}
static inline bool nbd_reply_is_structured(NBDReply *reply)
{
return reply->magic == NBD_STRUCTURED_REPLY_MAGIC;
}
const char *nbd_reply_type_lookup(uint16_t type);
const char *nbd_opt_lookup(uint32_t opt);
const char *nbd_rep_lookup(uint32_t rep);
const char *nbd_info_lookup(uint16_t info);
const char *nbd_cmd_lookup(uint16_t info);
const char *nbd_err_lookup(int err);
#endif
| 37.22871 | 80 | 0.668322 |
7fa4256a2176a4a0d7252355971ab2077d72f68c | 2,598 | h | C | Plugins/MadaraLibrary/Source/ThirdParty/madara/filters/BufferFilterHeader.h | jredmondson/GamsPlugins | d133f86c263997a55f11b3b3d3344faeee60d726 | [
"BSD-2-Clause"
] | 3 | 2020-03-25T01:59:20.000Z | 2020-06-02T17:58:05.000Z | Plugins/MadaraLibrary/Source/ThirdParty/madara/filters/BufferFilterHeader.h | jredmondson/GamsPlugins | d133f86c263997a55f11b3b3d3344faeee60d726 | [
"BSD-2-Clause"
] | null | null | null | Plugins/MadaraLibrary/Source/ThirdParty/madara/filters/BufferFilterHeader.h | jredmondson/GamsPlugins | d133f86c263997a55f11b3b3d3344faeee60d726 | [
"BSD-2-Clause"
] | 1 | 2020-11-05T23:04:05.000Z | 2020-11-05T23:04:05.000Z | #ifndef _MADARA_FILTERS_BUFFERFILTERHEADER_H_
#define _MADARA_FILTERS_BUFFERFILTERHEADER_H_
/**
* @file BufferFilterHeader.h
* @author James Edmondson <jedmondson@gmail.com>
*
* This file contains the buffer filter header used in
* checkpointing and messaging
**/
#include "madara/utility/StdInt.h"
#include "madara/MadaraExport.h"
#include "BufferFilter.h"
#include <string.h>
namespace madara
{
namespace filters
{
/**
* @class BufferFilterHeader
* @brief Defines a buffer filter header
*
* Format:
*
* [00] [8 byte unsigned size] <br />
* [08] [8 byte id string] <br />
* [16] [4 byte version] <br />
*/
class MADARA_EXPORT BufferFilterHeader
{
public:
/**
* Constructor
**/
BufferFilterHeader() : size(20), id(""), version(0) {}
/**
* Returns the size of the encoded BufferFilterHeader class
**/
static uint64_t encoded_size(void)
{
return 20;
}
/**
* Checks compatability between the header and the filter
* @param filter the filter to check
* @return true if the filter matches the header
**/
bool check_filter(filters::BufferFilter* filter);
/**
* Reads relevant fields from a filter
* @param filter the filter to refer to
**/
void read(filters::BufferFilter* filter);
/**
* Reads a BufferFiltefrHeader instance from a buffer and updates
* the amount of buffer room remaining.
* @param buffer the readable buffer where data is stored
* @param buffer_remaining the count of bytes remaining in the
* buffer to read
* @return current buffer position for next read
* @throw exceptions::MemoryException not enough buffer to encode
**/
const char* read(const char* buffer, int64_t& buffer_remaining);
/**
* Writes a BufferFilterHeader instance to a buffer and updates
* the amount of buffer room remaining.
* @param buffer the readable buffer where data is stored
* @param buffer_remaining the count of bytes remaining in the
* buffer to read
* @return current buffer position for next write
* @throw exceptions::MemoryException not enough buffer to encode
**/
char* write(char* buffer, int64_t& buffer_remaining);
/**
* the size of this header plus the updates
**/
uint64_t size = 0;
/**
* filter id
**/
char id[8] = {'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0'};
/**
* filter version
**/
uint32_t version = 0;
};
}
}
#include "BufferFilterHeader.inl"
#endif // _MADARA_FILTERS_BUFFERFILTERHEADER_H_
| 24.980769 | 69 | 0.650115 |
7ffe78b217d377de4393c6950740e67615e867eb | 216 | h | C | Projects/iOS < 6/PSUpdateApp/MainViewController.h | danielebogo/PSUpdateApp | 31faa5c7de3bd69d2449b2933b3d343868f9000b | [
"MIT"
] | 64 | 2015-01-26T16:13:06.000Z | 2022-03-28T08:17:32.000Z | Projects/iOS < 6/PSUpdateApp/MainViewController.h | danielebogo/PSUpdateApp | 31faa5c7de3bd69d2449b2933b3d343868f9000b | [
"MIT"
] | 4 | 2015-09-23T19:40:19.000Z | 2016-11-22T16:35:04.000Z | Projects/iOS < 6/PSUpdateApp/MainViewController.h | danielebogo/PSUpdateApp | 31faa5c7de3bd69d2449b2933b3d343868f9000b | [
"MIT"
] | 20 | 2015-03-24T22:25:26.000Z | 2018-06-22T18:44:13.000Z | //
// MainViewController.h
// PSUpdateApp
//
// Created by iBo on 18/02/13.
// Copyright (c) 2013 D-Still. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface MainViewController : UIViewController
@end
| 15.428571 | 52 | 0.694444 |
3652d8baf7d0425b92c2d1a80ea160d8ed1f53aa | 12,863 | h | C | xls/tools/testbench.h | ufo2011/xls | a1b408231664bdcbc782ec40b600fa7929588f53 | [
"Apache-2.0"
] | null | null | null | xls/tools/testbench.h | ufo2011/xls | a1b408231664bdcbc782ec40b600fa7929588f53 | [
"Apache-2.0"
] | null | null | null | xls/tools/testbench.h | ufo2011/xls | a1b408231664bdcbc782ec40b600fa7929588f53 | [
"Apache-2.0"
] | null | null | null | // Copyright 2020 The XLS Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef XLS_TOOLS_TESTBENCH_H_
#define XLS_TOOLS_TESTBENCH_H_
#include <cstdint>
#include <functional>
#include <thread> // NOLINT(build/c++11)
#include <type_traits>
#include "absl/base/internal/sysinfo.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xls/common/file/filesystem.h"
#include "xls/tools/testbench_thread.h"
namespace xls {
// Testbench is a helper class to test an XLS module (or...anything, really)
// across a range of inputs. This class creates a set of worker threads and
// divides the input across them, and sets them to go until complete.
// Execution status (percent complete, number of result mismatches) will be
// periodically printed to the terminal, as this class' primary use is for
// exploring large test spaces.
//
// Presently, work is parititioned uniformly across threads at startup. This can
// lead to work imbalance if certain areas of the input space execute faster
// than others. More advanced strategies can be explored in the future if this
// becomes a problem.
namespace internal {
// Forward decl of common Testbench base class.
template <typename JitWrapperT, typename InputT, typename ResultT,
typename ShardDataT>
class TestbenchBase;
} // namespace internal
// Testbench supports two modes of operation: one that passes per-shard data to
// the execution functions, and one that does not. The implementations switch on
// the presence of the last "ShardDataT" template type parameter. The only real
// difference is the signatures exposed - this implementation has an extra
// ShardDataT parameter on its creation command.
template <typename JitWrapperT, typename InputT, typename ResultT,
typename ShardDataT = void, typename EnableT = void>
class Testbench
: public internal::TestbenchBase<JitWrapperT, InputT, ResultT, ShardDataT> {
public:
// Args:
// start, end: The bounds of the space to evaluate, as [start, end).
// max_failures: The maximum number of result mismatches to allow (per
// worker thread) before cancelling execution.
// compute_actual: The function to call to calculate the XLS result.
// "result_buffer" is a convenience buffer provided as temporary storage
// to hold result data if using view types in the calculation. This buffer
// isn't directly used internally all - it's just a convenience to avoid
// the need to heap allocate on every iteration.
// compare_results: Should return true if both ResultTs (expected & actual)
// are considered equivalent.
// These lambdas return pure InputTs and ResultTs instead of wrapping them in
// StatusOrs so we don't pay that tax on every iteration. If our algorithms
// die, we should fix that before evaluating for correctness (since any
// changes might affect results).
// All lambdas must be thread-safe.
Testbench(
uint64_t start, uint64_t end, uint64_t max_failures,
std::function<InputT(uint64_t)> index_to_input,
std::function<std::unique_ptr<ShardDataT>()> create_shard,
std::function<ResultT(ShardDataT*, InputT)> compute_expected,
std::function<ResultT(JitWrapperT*, ShardDataT*, InputT)> compute_actual,
std::function<bool(ResultT, ResultT)> compare_results,
std::function<void(int64_t, InputT, ResultT, ResultT)> log_errors)
: internal::TestbenchBase<JitWrapperT, InputT, ResultT, ShardDataT>(
start, end, max_failures, index_to_input, compare_results,
log_errors),
create_shard_(create_shard),
compute_expected_(compute_expected),
compute_actual_(compute_actual) {
this->thread_create_fn_ = [this](uint64_t start, uint64_t end) {
return std::make_unique<
TestbenchThread<JitWrapperT, InputT, ResultT, ShardDataT>>(
&this->mutex_, &this->wake_me_, start, end, this->max_failures_,
this->index_to_input_, create_shard_, compute_expected_,
compute_actual_, this->compare_results_, this->log_errors_);
};
}
private:
std::function<std::unique_ptr<ShardDataT>()> create_shard_;
std::function<ResultT(ShardDataT*, InputT)> compute_expected_;
std::function<ResultT(JitWrapperT*, ShardDataT*, InputT)> compute_actual_;
};
// Shard-data-less implementation.
template <typename JitWrapperT, typename InputT, typename ResultT,
typename ShardDataT>
class Testbench<JitWrapperT, InputT, ResultT, ShardDataT,
typename std::enable_if<std::is_void<ShardDataT>::value>::type>
: public internal::TestbenchBase<JitWrapperT, InputT, ResultT, ShardDataT> {
public:
Testbench(uint64_t start, uint64_t end, uint64_t max_failures,
std::function<InputT(uint64_t)> index_to_input,
std::function<ResultT(InputT)> compute_expected,
std::function<ResultT(JitWrapperT*, InputT)> compute_actual,
std::function<bool(ResultT, ResultT)> compare_results,
std::function<void(int64_t, InputT, ResultT, ResultT)> log_errors)
: internal::TestbenchBase<JitWrapperT, InputT, ResultT, ShardDataT>(
start, end, max_failures, index_to_input, compare_results,
log_errors),
compute_expected_(compute_expected),
compute_actual_(compute_actual) {
this->thread_create_fn_ = [this](uint64_t start, uint64_t end) {
return std::make_unique<
TestbenchThread<JitWrapperT, InputT, ResultT, ShardDataT>>(
&this->mutex_, &this->wake_me_, start, end, this->max_failures_,
this->index_to_input_, compute_expected_, compute_actual_,
this->compare_results_, this->log_errors_);
};
}
private:
std::function<ResultT(InputT)> compute_expected_;
std::function<ResultT(JitWrapperT*, InputT)> compute_actual_;
};
// INTERNAL IMPL ---------------------------------
namespace internal {
// This common base class implements the _real_ logic: spawning runner threads
// and monitoring the results.
template <typename JitWrapperT, typename InputT, typename ResultT,
typename ShardDataT>
class TestbenchBase {
public:
TestbenchBase(
uint64_t start, uint64_t end, uint64_t max_failures,
std::function<InputT(uint64_t)> index_to_input,
std::function<bool(ResultT, ResultT)> compare_results,
std::function<void(int64_t, InputT, ResultT, ResultT)> log_errors)
: started_(false),
num_threads_(std::thread::hardware_concurrency()),
start_(start),
end_(end),
max_failures_(max_failures),
num_samples_processed_(0),
index_to_input_(index_to_input),
compare_results_(compare_results),
log_errors_(log_errors) {}
// Sets the number of threads to use. Must be called before Run().
absl::Status SetNumThreads(int num_threads) {
absl::MutexLock lock(&mutex_);
if (this->started_) {
return absl::FailedPreconditionError(
"Can't change the number of threads after starting execution.");
}
num_threads_ = num_threads;
return absl::OkStatus();
}
// Executes the test.
absl::Status Run() {
// Lock before spawning threads to prevent missing any early wakeup signals
// here.
mutex_.Lock();
started_ = true;
start_time_ = absl::Now();
// Set up all the workers.
uint64_t chunk_size = (end_ - start_) / num_threads_;
uint64_t chunk_remainder =
chunk_size == 0 ? (end_ - start_) : (end_ - start_) % chunk_size;
uint64_t first = 0;
uint64_t last;
for (int i = 0; i < num_threads_; i++) {
last = first + chunk_size;
// Distribute any remainder evenly amongst the threads.
if (chunk_remainder > 0) {
last++;
chunk_remainder--;
}
threads_.push_back(thread_create_fn_(first, last));
threads_.back()->Run();
first = last + 1;
}
// Now monitor them.
bool done = false;
while (!done) {
int num_done = 0;
wake_me_.WaitWithTimeout(&mutex_, kPrintInterval);
PrintStatus();
// See if everyone's done or if someone blew up.
for (int i = 0; i < threads_.size(); i++) {
if (!threads_[i]->running()) {
num_done++;
absl::Status status = threads_[i]->status();
if (!status.ok()) {
Cancel();
num_done = threads_.size();
break;
}
}
}
done = num_done == threads_.size();
}
// When exiting the loop, we'll be holding the lock (due to
// WaitWithTimeout).
mutex_.Unlock();
// Join threads at the end because we are polite.
for (int i = 0; i < threads_.size(); i++) {
threads_[i]->Join();
}
for (int i = 0; i < threads_.size(); i++) {
if (threads_[i]->num_failures() != 0) {
return absl::InternalError(
"There was at least one mismatch during execution.");
}
}
return absl::OkStatus();
}
protected:
// How many seconds to wait before printing status (at most).
static constexpr absl::Duration kPrintInterval = absl::Seconds(5);
// Prints the current execution status across all threads.
void PrintStatus() {
// Get the remainder-adjusted chunk size for this thread.
auto thread_chunk_size = [this](int thread_index) {
uint64_t total_size = end_ - start_;
uint64_t chunk_size = total_size / threads_.size();
uint64_t remainder =
chunk_size == 0 ? total_size : total_size % chunk_size;
if (thread_index < remainder) {
chunk_size++;
}
return chunk_size;
};
// Ignore remainder issues here. It shouldn't matter much at all.
absl::Time now = absl::Now();
auto delta = now - start_time_;
uint64_t total_done = 0;
for (int64_t i = 0; i < threads_.size(); ++i) {
uint64_t num_passes = threads_[i]->num_passes();
uint64_t num_failures = threads_[i]->num_failures();
uint64_t thread_done = num_passes + num_failures;
uint64_t chunk_size = thread_chunk_size(i);
total_done += thread_done;
std::cout << absl::StreamFormat(
"thread %02d: %f%% @ %.1f us/sample :: failures %d", i,
static_cast<double>(thread_done) / chunk_size * 100.0,
absl::ToDoubleMicroseconds(delta) / thread_done,
num_failures)
<< "\n";
}
double done_per_second = delta == absl::ZeroDuration()
? 0.0
: total_done / absl::ToDoubleSeconds(delta);
int64_t remaining = end_ - start_ - total_done;
auto estimate = absl::Seconds(
done_per_second == 0.0 ? 0.0 : remaining / done_per_second);
double throughput_this_print =
static_cast<double>(total_done - num_samples_processed_) /
ToInt64Seconds(kPrintInterval);
std::cout << absl::StreamFormat(
"--- ^ after %s elapsed; %.2f Misamples/s; estimate %s "
"remaining ...",
absl::FormatDuration(delta),
throughput_this_print / std::pow(2, 20),
absl::FormatDuration(estimate))
<< std::endl;
num_samples_processed_ = total_done;
}
// Requests that all running threads terminate (but doesn't Join() them).
void Cancel() {
for (int i = 0; i < threads_.size(); i++) {
threads_[i]->Cancel();
}
}
bool started_;
int num_threads_;
absl::Time start_time_;
uint64_t start_;
uint64_t end_;
uint64_t max_failures_;
uint64_t num_samples_processed_;
std::function<InputT(uint64_t)> index_to_input_;
std::function<bool(ResultT, ResultT)> compare_results_;
std::function<void(int64_t, InputT, ResultT, ResultT)> log_errors_;
using ThreadT = TestbenchThread<JitWrapperT, InputT, ResultT, ShardDataT>;
std::function<std::unique_ptr<ThreadT>(uint64_t, uint64_t)> thread_create_fn_;
std::vector<std::unique_ptr<ThreadT>> threads_;
// The main thread sleeps while tests are running. As worker threads finish,
// they'll wake us up via this condvar.
absl::Mutex mutex_;
absl::CondVar wake_me_ ABSL_GUARDED_BY(mutex_);
};
} // namespace internal
} // namespace xls
#endif // XLS_TOOLS_TESTBENCH_H_
| 39.097264 | 80 | 0.66804 |
d54fea8bb4deb07e0b992d604cd9c756f1d1d5ad | 7,552 | h | C | lib/sw_services/xilnvm/src/common/xnvm_defs.h | erique/embeddedsw | 4b5fd15c71405844e03f2c276daa38cfcbb6459b | [
"BSD-2-Clause",
"MIT"
] | 1 | 2021-12-17T18:07:58.000Z | 2021-12-17T18:07:58.000Z | lib/sw_services/xilnvm/src/common/xnvm_defs.h | erique/embeddedsw | 4b5fd15c71405844e03f2c276daa38cfcbb6459b | [
"BSD-2-Clause",
"MIT"
] | null | null | null | lib/sw_services/xilnvm/src/common/xnvm_defs.h | erique/embeddedsw | 4b5fd15c71405844e03f2c276daa38cfcbb6459b | [
"BSD-2-Clause",
"MIT"
] | null | null | null | /******************************************************************************
* Copyright (c) 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
*******************************************************************************/
/*****************************************************************************/
/**
*
* @file xnvm_defs.h
* @addtogroup xnvm_api_ids XilNvm API IDs
* @{
*
* @cond xnvm_internal
* This file contains the xilnvm API IDs
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -------------------------------------------------------
* 1.0 kal 07/05/21 Initial release
* kal 07/25/21 Added eFUSE IPI API_IDs and common structures between
* client and server
* kpt 08/27/21 Added client-server support for puf helper data efuse
* programming
*
* </pre>
* @note
*
* @endcond
******************************************************************************/
#ifndef XNVM_DEFS_H
#define XNVM_DEFS_H
#ifdef __cplusplus
extern "C" {
#endif
/***************************** Include Files *********************************/
#include "xil_printf.h"
#include "xil_types.h"
/************************** Constant Definitions ****************************/
/**@cond xnvm_internal
* @{
*/
/* Enable client printfs by setting XNVM_DEBUG to 1 */
#define XNVM_DEBUG (0U)
#if (XNVM_DEBUG)
#define XNVM_DEBUG_GENERAL (1U)
#else
#define XNVM_DEBUG_GENERAL (0U)
#endif
/* Key and Iv length definitions for Versal eFuse */
#define XNVM_EFUSE_AES_KEY_LEN_IN_WORDS (8U)
#define XNVM_EFUSE_IV_LEN_IN_WORDS (3U)
#define XNVM_EFUSE_PPK_HASH_LEN_IN_WORDS (8U)
#define XNVM_EFUSE_DNA_IN_WORDS (4U)
#define XNVM_PUF_FORMATTED_SYN_DATA_LEN_IN_WORDS (127U)
#define XNVM_NUM_OF_REVOKE_ID_FUSES (8U)
#define XNVM_NUM_OF_OFFCHIP_ID_FUSES (8U)
#define XNVM_EFUSE_IV_LEN_IN_BITS (96U)
#define XNVM_EFUSE_AES_KEY_LEN_IN_BITS (256U)
#define XNVM_EFUSE_PPK_HASH_LEN_IN_BITS (256U)
#define XNVM_EFUSE_IV_LEN_IN_BYES (12U)
#define XNVM_EFUSE_AES_KEY_LEN_IN_BYTES (32U)
#define XNVM_EFUSE_PPK_HASH_LEN_IN_BYTES (32U)
#define XNVM_IV_STRING_LEN (24U)
/***************** Macros (Inline Functions) Definitions *********************/
#define XNvm_Printf(DebugType, ...) \
if ((DebugType) == 1U) {xil_printf (__VA_ARGS__);}
/* Macro to typecast XILSECURE API ID */
#define XNVM_API(ApiId) ((u32)ApiId)
#define XNVM_API_ID_MASK (0xFFU)
/************************** Variable Definitions *****************************/
/**************************** Type Definitions *******************************/
typedef struct {
u32 Hash[XNVM_EFUSE_PPK_HASH_LEN_IN_WORDS];
} XNvm_PpkHash;
typedef struct {
u32 Iv[XNVM_EFUSE_IV_LEN_IN_WORDS];
} XNvm_Iv;
typedef struct {
u32 Dna[XNVM_EFUSE_DNA_IN_WORDS];
} XNvm_Dna;
typedef enum {
XNVM_EFUSE_META_HEADER_IV_RANGE = 0,
XNVM_EFUSE_BLACK_IV,
XNVM_EFUSE_PLM_IV_RANGE,
XNVM_EFUSE_DATA_PARTITION_IV_RANGE
} XNvm_IvType;
typedef enum {
XNVM_EFUSE_PPK0 = 0,
XNVM_EFUSE_PPK1,
XNVM_EFUSE_PPK2
} XNvm_PpkType;
typedef enum {
XNVM_EFUSE_REVOCATION_ID_0 = 0,
XNVM_EFUSE_REVOCATION_ID_1,
XNVM_EFUSE_REVOCATION_ID_2,
XNVM_EFUSE_REVOCATION_ID_3,
XNVM_EFUSE_REVOCATION_ID_4,
XNVM_EFUSE_REVOCATION_ID_5,
XNVM_EFUSE_REVOCATION_ID_6,
XNVM_EFUSE_REVOCATION_ID_7
} XNvm_RevocationId;
typedef enum {
XNVM_EFUSE_INVLD = -1,
XNVM_EFUSE_OFFCHIP_REVOKE_ID_0 = 0,
XNVM_EFUSE_OFFCHIP_REVOKE_ID_1,
XNVM_EFUSE_OFFCHIP_REVOKE_ID_2,
XNVM_EFUSE_OFFCHIP_REVOKE_ID_3,
XNVM_EFUSE_OFFCHIP_REVOKE_ID_4,
XNVM_EFUSE_OFFCHIP_REVOKE_ID_5,
XNVM_EFUSE_OFFCHIP_REVOKE_ID_6,
XNVM_EFUSE_OFFCHIP_REVOKE_ID_7
} XNvm_OffchipId;
typedef struct {
u8 AesDis;
u8 JtagErrOutDis;
u8 JtagDis;
u8 Ppk0WrLk;
u8 Ppk1WrLk;
u8 Ppk2WrLk;
u8 AesCrcLk;
u8 AesWrLk;
u8 UserKey0CrcLk;
u8 UserKey0WrLk;
u8 UserKey1CrcLk;
u8 UserKey1WrLk;
u8 SecDbgDis;
u8 SecLockDbgDis;
u8 BootEnvWrLk;
u8 RegInitDis;
} XNvm_EfuseSecCtrlBits;
typedef struct {
u8 PufRegenDis;
u8 PufHdInvalid;
u8 PufTest2Dis;
u8 PufDis;
u8 PufSynLk;
} XNvm_EfusePufSecCtrlBits;
typedef struct {
u8 GlitchDetHaltBootEn;
u8 GlitchDetRomMonitorEn;
u8 HaltBootError;
u8 HaltBootEnv;
u8 CryptoKatEn;
u8 LbistEn;
u8 SafetyMissionEn;
u8 Ppk0Invalid;
u8 Ppk1Invalid;
u8 Ppk2Invalid;
} XNvm_EfuseMiscCtrlBits;
typedef struct {
u8 LpdMbistEn;
u8 PmcMbistEn;
u8 LpdNocScEn;
u8 SysmonVoltMonEn;
u8 SysmonTempMonEn;
} XNvm_EfuseSecMisc1Bits;
typedef struct {
u8 PrgmSysmonTempHot;
u8 PrgmSysmonVoltPmc;
u8 PrgmSysmonVoltPslp;
u8 PrgmSysmonTempCold;
u8 SysmonTempEn;
u8 SysmonVoltEn;
u8 SysmonVoltSoc;
u8 SysmonTempHot;
u8 SysmonVoltPmc;
u8 SysmonVoltPslp;
u8 SysmonTempCold;
} XNvm_EfuseBootEnvCtrlBits;
typedef struct {
u8 PrgmGlitch;
u8 GlitchDetWrLk;
u32 GlitchDetTrim;
u8 GdRomMonitorEn;
u8 GdHaltBootEn;
} XNvm_EfuseGlitchCfgBits;
typedef struct {
u8 PrgmAesKey;
u8 PrgmUserKey0;
u8 PrgmUserKey1;
u32 AesKey[XNVM_EFUSE_AES_KEY_LEN_IN_WORDS];
u32 UserKey0[XNVM_EFUSE_AES_KEY_LEN_IN_WORDS];
u32 UserKey1[XNVM_EFUSE_AES_KEY_LEN_IN_WORDS];
} XNvm_EfuseAesKeys;
typedef struct {
u8 PrgmPpk0Hash;
u8 PrgmPpk1Hash;
u8 PrgmPpk2Hash;
u32 Ppk0Hash[XNVM_EFUSE_PPK_HASH_LEN_IN_WORDS];
u32 Ppk1Hash[XNVM_EFUSE_PPK_HASH_LEN_IN_WORDS];
u32 Ppk2Hash[XNVM_EFUSE_PPK_HASH_LEN_IN_WORDS];
} XNvm_EfusePpkHash;
typedef struct {
u8 PrgmDecOnly;
} XNvm_EfuseDecOnly;
typedef struct {
u8 PrgmRevokeId;
u32 RevokeId[XNVM_NUM_OF_REVOKE_ID_FUSES];
} XNvm_EfuseRevokeIds;
typedef struct {
u8 PrgmOffchipId;
u32 OffChipId[XNVM_NUM_OF_OFFCHIP_ID_FUSES];
} XNvm_EfuseOffChipIds;
typedef struct {
u8 PrgmMetaHeaderIv;
u8 PrgmBlkObfusIv;
u8 PrgmPlmIv;
u8 PrgmDataPartitionIv;
u32 MetaHeaderIv[XNVM_EFUSE_IV_LEN_IN_WORDS];
u32 BlkObfusIv[XNVM_EFUSE_IV_LEN_IN_WORDS];
u32 PlmIv[XNVM_EFUSE_IV_LEN_IN_WORDS];
u32 DataPartitionIv[XNVM_EFUSE_IV_LEN_IN_WORDS];
} XNvm_EfuseIvs;
typedef struct {
u32 StartUserFuseNum;
u32 NumOfUserFuses;
u64 UserFuseDataAddr;
} XNvm_EfuseUserDataAddr;
#ifdef XNVM_ACCESS_PUF_USER_DATA
typedef struct {
u8 EnvMonitorDis;
u8 PrgmPufFuse;
u32 StartPufFuseNum;
u32 NumOfPufFuses;
u64 PufFuseDataAddr;
} XNvm_EfusePufFuseAddr;
#else
typedef struct {
XNvm_EfusePufSecCtrlBits PufSecCtrlBits;
u8 PrgmPufHelperData;
u8 EnvMonitorDis;
u32 EfuseSynData[XNVM_PUF_FORMATTED_SYN_DATA_LEN_IN_WORDS];
u32 Chash;
u32 Aux;
}XNvm_EfusePufHdAddr;
#endif
typedef struct {
u64 EnvMonDisFlag;
u64 AesKeyAddr;
u64 PpkHashAddr;
u64 DecOnlyAddr;
u64 SecCtrlAddr;
u64 MiscCtrlAddr;
u64 RevokeIdAddr;
u64 IvAddr;
u64 UserFuseAddr;
u64 GlitchCfgAddr;
u64 BootEnvCtrlAddr;
u64 Misc1CtrlAddr;
u64 OffChipIdAddr;
} XNvm_EfuseDataAddr;
/* XilNVM API ids */
typedef enum {
XNVM_API_FEATURES = 0U,
XNVM_BBRAM_WRITE_AES_KEY,
XNVM_BBRAM_ZEROIZE,
XNVM_BBRAM_WRITE_USER_DATA,
XNVM_BBRAM_READ_USER_DATA,
XNVM_BBRAM_LOCK_WRITE_USER_DATA,
XNVM_EFUSE_WRITE,
XNVM_EFUSE_WRITE_PUF,
XNVM_EFUSE_PUF_USER_FUSE_WRITE,
XNVM_EFUSE_READ_IV,
XNVM_EFUSE_READ_REVOCATION_ID,
XNVM_EFUSE_READ_OFFCHIP_REVOCATION_ID,
XNVM_EFUSE_READ_USER_FUSES,
XNVM_EFUSE_READ_MISC_CTRL_BITS,
XNVM_EFUSE_READ_SEC_CTRL_BITS,
XNVM_EFUSE_READ_SEC_MISC1_BITS,
XNVM_EFUSE_READ_BOOT_ENV_CTRL_BITS,
XNVM_EFUSE_READ_PUF_SEC_CTRL_BITS,
XNVM_EFUSE_READ_PPK_HASH,
XNVM_EFUSE_READ_DEC_EFUSE_ONLY,
XNVM_EFUSE_READ_DNA,
XNVM_EFUSE_READ_PUF_USER_FUSE,
XNVM_EFUSE_READ_PUF,
XNVM_API_MAX,
} XNvm_ApiId;
/**
* @}
* @endcond
*/
#ifdef __cplusplus
}
#endif
#endif /* XNVM_DEFS_H */
| 23.236923 | 80 | 0.718485 |
ec3094b2fb43395d6ee3e6b4ea8c3c1af7c3d4ca | 6,822 | h | C | Renderer.h | derekniess/3D-Physics-Engine-Framework | 3a0c2c83813bfa1a433dbed4edfcdaa1082bfd32 | [
"MIT"
] | 3 | 2020-05-21T05:53:59.000Z | 2020-09-26T11:20:33.000Z | Renderer.h | derekniess/3D-Physics-Engine | 3a0c2c83813bfa1a433dbed4edfcdaa1082bfd32 | [
"MIT"
] | null | null | null | Renderer.h | derekniess/3D-Physics-Engine | 3a0c2c83813bfa1a433dbed4edfcdaa1082bfd32 | [
"MIT"
] | null | null | null | #pragma once
#include "Typedefs.h"
// GLEW
#include <GL/glew.h>
// GLFW
#include <GLFW/glfw3.h>
#include "Observer.h"
// Render utilities
#include "ShaderProgram.h"
class Primitive;
class Light;
class Camera;
class Engine;
class Renderer : public Observer
{
/*----------MEMBER VARIABLES----------*/
public:
enum PrimitiveDataType
{
STATIC, // Vertex data is immutable for lifespan of primitive
DYNAMIC, // Vertex data can change over lifespan of primitive
DataTypeCount
};
/*--------------------------- CONSTANTS --------------------------------*/
const static int MAXIMUM_STATIC_RENDER_OBJECTS = 4096;
const static int MAXIMUM_DYNAMIC_RENDER_OBJECTS = 1024;
/*--------------------------- SHADER PROGRAMS --------------------------------*/
ShaderProgram DefaultShader;
ShaderProgram DebugNormalsShader;
ShaderProgram DebugMeshShader;
ShaderProgram BillboardingQuadsShader;
ShaderProgram LightSourceShader;
/*--------------------------- VERTEX ARRAY OBJECTS --------------------------------*/
GLuint * StaticVAOList[MAXIMUM_STATIC_RENDER_OBJECTS];
GLuint * DynamicVAOList[MAXIMUM_DYNAMIC_RENDER_OBJECTS];
/*--------------------------- VERTEX BUFFER OBJECTS --------------------------------*/
GLuint * StaticVBOList[MAXIMUM_STATIC_RENDER_OBJECTS];
GLuint * DynamicVBOList[MAXIMUM_DYNAMIC_RENDER_OBJECTS];
/*--------------------------- ELEMENT BUFFER OBJECTS --------------------------------*/
GLuint * EABList[MAXIMUM_STATIC_RENDER_OBJECTS];
/*--------------------------- TEXTURE BUFFER OBJECTS --------------------------------*/
GLuint * TBOList[MAXIMUM_STATIC_RENDER_OBJECTS];
/*--------------------------- REGISTRIES --------------------------------*/
// Holds the list of slots available for static objects to bind VAO and VBOs - false is 'empty', true is 'currently in use'
bool StaticObjectRegistry[MAXIMUM_STATIC_RENDER_OBJECTS];
// Holds the list of slots available for dynamic objects to bind VAO and VBOs - false is 'empty', true is 'currently in use'
bool DynamicObjectRegistry[MAXIMUM_DYNAMIC_RENDER_OBJECTS];
/*------------------------------- ENGINE REFERENCE -------------------------------*/
Engine & EngineHandle;
/*--------------------------- MATRICES --------------------------------*/
glm::mat4 Projection;
glm::mat4 View;
// The horizontal Field of View, in degrees : the amount of "zoom". Think "camera lens". Usually between 90� (extra wide) and 30� (quite zoomed in)
float FieldOfView = 45.0f;
// Debug Primitives
Primitive * DebugArrowPrimitive;
Primitive * DebugQuadPrimitive;
// List of render components
std::vector<Primitive *> RenderList;
// List of light components
std::vector<Light *> LightList;
// Holds the number of currently active/bound textures
GLuint TextureCount;
// The thickness of debug wireframe lines
static int WireframeThickness;
// The thickness of debug line loops
static int LineLoopThickness;
// TODO [@Derek] : Later on use an array of unique ptrs to cameras owned by renderer,
// Active camera at any time is pointed to by this pointer
Camera * pActiveCamera;
/*----------MEMBER FUNCTIONS----------*/
public:
Renderer(Engine & aEngine) :EngineHandle(aEngine),
DefaultShader(*this),
DebugNormalsShader(*this),
DebugMeshShader(*this),
BillboardingQuadsShader(*this),
LightSourceShader(*this),
TextureCount(0)
{}
virtual ~Renderer()
{
for (int i = 0; i < MAXIMUM_STATIC_RENDER_OBJECTS; i++)
{
// BUFFER DELETION
glDeleteBuffers(1, StaticVBOList[i]);
glDeleteBuffers(1, EABList[i]);
// TEXTURE DELETION
glDeleteTextures(1, TBOList[i]);
// VERTEX ARRAY DELETION
glDeleteVertexArrays(1, StaticVAOList[i]);
}
for (int i = 0; i < MAXIMUM_DYNAMIC_RENDER_OBJECTS; i++)
{
// BUFFER DELETION
glDeleteBuffers(1,DynamicVBOList[i]);
// VERTEX ARRAY DELETION
glDeleteVertexArrays(1, DynamicVAOList[i]);
}
}
// Getters
inline GLuint const & GetActiveShaderProgram() { return DefaultShader.GetShaderProgram(); }
inline int GetRenderListSize() { return (int)RenderList.size(); }
inline int GetTextureCount() { return TextureCount; }
Engine & GetEngine() { return EngineHandle; }
// Setters
inline void SetActiveCamera(Camera * aCameraPtr) { pActiveCamera = aCameraPtr; }
void InititalizeRenderer();
/* --- Primitive handling functions --- */
// Converts static primitive to dynamic primitive
void ConvertStaticToDynamic(Primitive * aPrimitive);
// Gives a primitive a VAO & VBO to use, adds to list of render objects
void RegisterPrimitive(Primitive * aNewPrimitive);
// Called by RegisterPrimitive()
void RegisterStaticPrimitive(Primitive * aNewPrimitive);
void RegisterDynamicPrimitive(Primitive * aNewPrimitive);
// Removes a primitive from Dynamic/Static list
void DeregisterPrimitive(Primitive * aOldPrimitive);
// Called by DeregisterPrimitive()
void DeregisterStaticPrimitive(Primitive * aOldPrimitive);
void DeregisterDynamicPrimitive(Primitive * aOldPrimitive);
// Create the debug arrow primitive and save it for later
void CreateDebugArrowPrimitive();
// Create the debug quad primitive and save it for later
void CreateDebugQuadPrimitive();
/* --- Light handling functions --- */
void RegisterLight(Light * aNewLight);
// Binds texture at requested ID to primitive
bool BindTexture(Primitive * aPrimitive, int aTextureID);
void Render();
// Main pass render functions
void MainRenderPass();
// Debug pass render function
void DebugRenderPass();
void RenderDebugWireframes(GLint aMVPAttributeIndex);
void RenderDebugNormals(GLint aMVPAttributeIndex);
void RenderDebugArrows(GLint aMVPAttributeIndex);
void RenderDebugLineLoops(GLint aMVPAttributeIndex);
void RenderBillboardingQuads(GLint aModelAttributeIndex, GLint aViewAttributeIndex, GLint aProjectionAttributeIndex, GLint aBillboardModeAttributeIndex);
void RenderLightSources(GLint aMVPAttributeIndex);
// Utility functions
inline void SetRenderModeWireframe()
{
glLineWidth((GLfloat)WireframeThickness);
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
}
inline void SetRenderModeFill()
{
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
}
static void check_gl_error_render()
{
GLenum err(glGetError());
using namespace std;
while (err != GL_NO_ERROR) {
string error;
switch (err) {
case GL_INVALID_OPERATION: error = "INVALID_OPERATION"; break;
case GL_INVALID_ENUM: error = "INVALID_ENUM"; break;
case GL_INVALID_VALUE: error = "INVALID_VALUE"; break;
case GL_OUT_OF_MEMORY: error = "OUT_OF_MEMORY"; break;
case GL_INVALID_FRAMEBUFFER_OPERATION: error = "INVALID_FRAMEBUFFER_OPERATION"; break;
}
cerr << "GL_" << error.c_str() << endl;
err = glGetError();
}
}
virtual void OnNotify(Event * aEvent) override;
}; | 34.281407 | 154 | 0.684697 |
b95c785990be15d836de223d9cec0ca87b8be19a | 520 | c | C | d/magic/symbols/old/lathander_symbol.c | facadepapergirl/shadowgate | 2b811671c5c85952ed93767753d72fc0d12819d8 | [
"MIT"
] | null | null | null | d/magic/symbols/old/lathander_symbol.c | facadepapergirl/shadowgate | 2b811671c5c85952ed93767753d72fc0d12819d8 | [
"MIT"
] | null | null | null | d/magic/symbols/old/lathander_symbol.c | facadepapergirl/shadowgate | 2b811671c5c85952ed93767753d72fc0d12819d8 | [
"MIT"
] | null | null | null | // /d/magic/symbols/lathander_symbol.c
#include <std.h>
inherit "/d/magic/symbols/holy_symbol.c";
void create(){
::create();
set_id(({"holy symbol","symbol","lathander_holy_symbol","disk","quartz disk"}));
set_name("holy symbol");
set_short("holy symbol");
set_long("This is a holy symbol.");
}
void init(){
::init();
set_name("quartz disk");
set_short("a quartz disk");
set_long("A piece of rose quartz, enchanted to radiate a warm glow. The disk hangs from a brightly-colored sash.");
}
| 24.761905 | 118 | 0.667308 |
57981f0f13392d3e4387d5463e57b746e4e4a43a | 1,425 | h | C | Whip-Jump-Kick-Grab/TransformationProcesses.h | davidwadsworth/whip-jump-kick-grab | 443641a9044d1fd6f8fd09f6c509c67a8092a27c | [
"MIT"
] | 1 | 2021-02-19T00:31:50.000Z | 2021-02-19T00:31:50.000Z | Whip-Jump-Kick-Grab/TransformationProcesses.h | davidwadsworth/whip-jump-kick-grab | 443641a9044d1fd6f8fd09f6c509c67a8092a27c | [
"MIT"
] | null | null | null | Whip-Jump-Kick-Grab/TransformationProcesses.h | davidwadsworth/whip-jump-kick-grab | 443641a9044d1fd6f8fd09f6c509c67a8092a27c | [
"MIT"
] | null | null | null | #pragma once
#include "Process.h"
#include "Game.h"
class DisplayPrompt : public Process
{
int sprite_id_;
SDL_Rect* prompt_rect_;
public:
DisplayPrompt(int sprite_id)
: sprite_id_(sprite_id), prompt_rect_(new SDL_Rect{0, 0, SCREEN_WIDTH, 160})
{}
bool do_work() override
{
Game::assets->create_prompt(sprite_id_, prompt_rect_);
return true;
}
float work_done() override
{
return 1;
}
};
class DisplayRoundPrompt : public Process
{
SDL_Rect *round_dest_, *tens_dest_, *ones_dest_;
int round_id_, number_id_, round_number_;
public:
DisplayRoundPrompt(int round_id, int number_id, int round_number)
: round_dest_(new SDL_Rect{ 0,0, 600, 160 }), tens_dest_(new SDL_Rect{ 600, 0, 100, 160 }), ones_dest_(new SDL_Rect{700, 0, 100, 160}),
round_id_(round_id), number_id_(number_id), round_number_(round_number)
{}
bool do_work() override
{
Game::assets->create_prompt(round_id_, round_dest_);
Game::assets->create_prompt(number_id_ + (round_number_ / 10), tens_dest_);
Game::assets->create_prompt(number_id_ + (round_number_ % 10), ones_dest_);
return true;
}
float work_done() override { return 1; }
};
class ClearPrompts : public Process
{
public:
ClearPrompts()
{}
bool do_work() override
{
for (auto& p : manager.get_group(Game::group_prompts))
{
p->del_group(Game::group_prompts);
p->destroy();
}
return true;
}
float work_done() override { return 1; }
}; | 21.923077 | 138 | 0.714386 |
3b2666fe113d024ab5abf4059c279f736669afd9 | 1,378 | h | C | docs/atl/codesnippet/CPP/example-implementing-a-property-page_7.h | bobbrow/cpp-docs | 769b186399141c4ea93400863a7d8463987bf667 | [
"CC-BY-4.0",
"MIT"
] | 965 | 2017-06-25T23:57:11.000Z | 2022-03-31T14:17:32.000Z | docs/atl/codesnippet/CPP/example-implementing-a-property-page_7.h | bobbrow/cpp-docs | 769b186399141c4ea93400863a7d8463987bf667 | [
"CC-BY-4.0",
"MIT"
] | 3,272 | 2017-06-24T00:26:34.000Z | 2022-03-31T22:14:07.000Z | docs/atl/codesnippet/CPP/example-implementing-a-property-page_7.h | bobbrow/cpp-docs | 769b186399141c4ea93400863a7d8463987bf667 | [
"CC-BY-4.0",
"MIT"
] | 951 | 2017-06-25T12:36:14.000Z | 2022-03-26T22:49:06.000Z | STDMETHOD(Apply)(void)
{
// If we don't have any objects, this method should not be called
if (!m_ppUnk)
return E_UNEXPECTED;
// Use Apply to validate the user's settings and update the objects'
// properties
// Check whether we need to update the object
// Quite important since standard property frame calls Apply
// when it doesn't need to
if (!m_bDirty)
return S_OK;
HRESULT hr = E_UNEXPECTED;
// Get a pointer to the document
CComQIPtr<EnvDTE::Document> pDoc(m_ppUnk[0]);
if (!pDoc)
return hr;
// Get the read-only setting
VARIANT_BOOL bReadOnly = IsDlgButtonChecked(IDC_READONLY) ? VARIANT_TRUE : VARIANT_FALSE;
// Get the file name
CComBSTR bstrName;
if (!GetDlgItemText(IDC_NAME, bstrName.m_str))
return E_FAIL;
// Set the read-only property
if (bReadOnly != m_bReadOnly)
{
hr = pDoc->put_ReadOnly(bReadOnly);
if (FAILED(hr))
return hr;
}
// Save the document
if (bstrName != m_bstrFullName)
{
EnvDTE::vsSaveStatus status;
hr = pDoc->Save(bstrName, &status);
if (FAILED(hr))
return hr;
}
// Clear the dirty status of the property page
SetDirty(false);
return S_OK;
} | 26.5 | 95 | 0.582729 |
b6159073a5712faadaaf46cc97cc3d7eb8f369b2 | 392 | h | C | Sorting_Visualizer/Application.h | Nick-Mikrou/Sorting_Visualizer | cf7f5a7610bcb66c3c54e5785b10ede47baecbfa | [
"MIT"
] | null | null | null | Sorting_Visualizer/Application.h | Nick-Mikrou/Sorting_Visualizer | cf7f5a7610bcb66c3c54e5785b10ede47baecbfa | [
"MIT"
] | null | null | null | Sorting_Visualizer/Application.h | Nick-Mikrou/Sorting_Visualizer | cf7f5a7610bcb66c3c54e5785b10ede47baecbfa | [
"MIT"
] | null | null | null | #pragma once
#include <SFML/Graphics.hpp>
#include <iostream>
#include "Menu.h"
#include "Sorter.h"
class Application
{
private:
sf::RenderWindow Window;
unsigned int width, height;
std::string title;
Menu menu;
Sorter sorter;
public:
Application(const std::string&, const unsigned int&, const unsigned int&) noexcept;
// Constructor
void Run() noexcept;
// Render
}; | 17.043478 | 85 | 0.701531 |
d7ed4efb68c83e5687e605674bdf5c68dfd3a217 | 5,592 | c | C | extracted/plugins/SurfacePlugin/src/common/sqManualSurface.c | qbit/opensmalltalk-vm | c729640aa6235e9b9152d24e323c5736d88ee680 | [
"MIT"
] | 59 | 2019-08-12T16:46:24.000Z | 2022-03-04T17:48:06.000Z | extracted/plugins/SurfacePlugin/src/common/sqManualSurface.c | qbit/opensmalltalk-vm | c729640aa6235e9b9152d24e323c5736d88ee680 | [
"MIT"
] | 226 | 2019-08-18T08:33:43.000Z | 2022-03-29T17:42:08.000Z | extracted/plugins/SurfacePlugin/src/common/sqManualSurface.c | qbit/opensmalltalk-vm | c729640aa6235e9b9152d24e323c5736d88ee680 | [
"MIT"
] | 33 | 2019-08-27T22:20:33.000Z | 2022-02-16T18:37:08.000Z | #include "sq.h"
#include "sqVirtualMachine.h"
extern struct VirtualMachine* interpreterProxy;
#ifndef NULL
#define NULL 0
#endif
#ifndef FALSE
#define FALSE 0
#endif
#ifndef TRUE
#define TRUE 1
#endif
#include "pharovm/debug.h"
/* The following are for creating, manipulating, and destroying
* "manual surfaces". These are surfaces that are managed by Squeak code,
* which has manual control over the memory location where the image data is
* stored (the pointer used may be obtained via FFI calls, or other means).
*
* Upon creation, no memory is allocated for the surface. Squeak code is
* responsible for passing in a pointer to the memory to use. It is OK to set
* the pointer to different values, or to NULL. If the pointer is NULL, then
* BitBlt calls to ioLockSurface() will fail.
*
* createManualFunction() returns a non-negative surface ID if successful, and
* -1 otherwise. The other return true for success, and false for failure.
*/
#include "SurfacePlugin.h"
int createManualSurface(int width, int height, int rowPitch, int depth, int isMSB);
int destroyManualSurface(int surfaceID);
int setManualSurfacePointer(int surfaceID, void* ptr);
EXPORT(int) ioRegisterSurface(sqIntptr_t surfaceHandle, sqSurfaceDispatch *fn, int *surfaceID);
EXPORT(int) ioUnregisterSurface(int surfaceID);
EXPORT(int) ioFindSurface(int surfaceID, sqSurfaceDispatch *fn, sqIntptr_t *surfaceHandle);
/* This is the structure that represents a "manual surface". These are
created/destroyed by new primitives in this plugin. During its life-time,
it may be touched directly from Squeak code to set/clear "ptr", and also
treated as a generic surface via BitBlt's use of the SurfacePlugin. */
typedef struct {
int width;
int height;
int rowPitch;
int depth;
int isMSB;
void* ptr;
int isLocked;
} ManualSurface;
/* Create the dispatch-table that SurfacePlugin will use to interact with
instances of "struct ManualSurface" */
static int manualSurfaceGetFormat(sqIntptr_t, int*, int*, int*, int*);
static sqIntptr_t manualSurfaceLock(sqIntptr_t, int *, int, int, int, int);
static int manualSurfaceUnlock(sqIntptr_t, int, int, int, int);
static int manualSurfaceShow(sqIntptr_t, int, int, int, int);
static sqSurfaceDispatch manualSurfaceDispatch = {
1,
0,
manualSurfaceGetFormat,
manualSurfaceLock,
manualSurfaceUnlock,
manualSurfaceShow
};
/* sqSurfaceDispatch functions *****************************************************************************/
int manualSurfaceGetFormat(sqIntptr_t surfaceArg, int* width, int* height, int* depth, int* isMSB) {
ManualSurface* surface = (ManualSurface *)surfaceArg;
*width = surface->width;
*height = surface->height;
*depth = surface->depth;
*isMSB = surface->isMSB;
logTrace("Getting Surface Format: %" PRIxSQPTR " %ld %ld %ld %ld\n", (sqIntptr_t) surface, *width, *height, *depth, *isMSB);
return 1;
}
sqIntptr_t manualSurfaceLock(sqIntptr_t surfaceArg, int *pitch, int x, int y, int w, int h) {
ManualSurface* surface = (ManualSurface *)surfaceArg;
/* Ideally, would be atomic. But it doens't matter for the forseeable future,
since it is only called via BitBlt primitives. */
int wasLocked = surface->isLocked;
surface->isLocked = 1;
/* Can't lock if it was already locked. */
if (wasLocked) return 0;
/* If there is no pointer, the lock-attempt fails. */
if (!surface->ptr) {
surface->isLocked = 0;
return 0;
}
/* Success! Return the pointer. */
*pitch = surface->rowPitch;
logTrace("Locked Surface: %" PRIxSQPTR " Input Rect: %ld %ld %ld %ld Row Pitch: %ld\n", (sqIntptr_t) surface, x, y, w, h, *pitch);
return (sqIntptr_t)(surface->ptr);
}
int manualSurfaceUnlock(sqIntptr_t surfaceArg, int x, int y, int w, int h) {
ManualSurface* surface = (ManualSurface *)surfaceArg;
surface->isLocked = 0;
logTrace("Unlocked Surface: %" PRIxSQPTR " Rect: %ld %ld %ld %ld\n", (sqIntptr_t) surface, x, y, w, h);
return 1;
}
int manualSurfaceShow(sqIntptr_t surfaceArg, int x, int y, int w, int h) {
/* Unsupported */
return 0;
}
/* primitive interface functions (i.e. called from Squeak) *********************************************/
/* Answer non-negative surfaceID if successful, and -1 for failure. */
int createManualSurface(int width, int height, int rowPitch, int depth, int isMSB) {
ManualSurface* newSurface;
int surfaceID, result;
if (width < 0) return -1;
if (height < 0) return -1;
if (rowPitch < (width*depth)/8) return -1;
if (depth < 1 || depth > 32) return -1;
newSurface = (ManualSurface*)malloc(sizeof(ManualSurface));
if (!newSurface) return -1;
newSurface->width = width;
newSurface->height = height;
newSurface->rowPitch = rowPitch;
newSurface->depth = depth;
newSurface->isMSB = isMSB;
newSurface->ptr = NULL;
newSurface->isLocked = FALSE;
result = ioRegisterSurface((sqIntptr_t)newSurface, &manualSurfaceDispatch, &surfaceID);
if (!result) {
/* Failed to register surface. */
free(newSurface);
return -1;
}
return surfaceID;
}
int destroyManualSurface(int surfaceID) {
return ioUnregisterSurface(surfaceID);
}
int setManualSurfacePointer(int surfaceID, void* ptr) {
sqIntptr_t surfaceHandle;
ManualSurface *surface;
int result;
result = ioFindSurface(surfaceID, NULL, &surfaceHandle);
if (!result) return FALSE; /* failed to find surface */
surface = (ManualSurface*)surfaceHandle;
if (surface->isLocked) return FALSE; /* can't set pointer while surface is locked */
surface->ptr = ptr;
logTrace("Set Surface: %lx Pointer: %" PRIxSQPTR "\n", surfaceID, (sqIntptr_t)ptr);
return TRUE;
}
| 33.285714 | 132 | 0.712268 |
acfa8a7841bf6f54a210ef15187a2384858287af | 3,298 | h | C | SGPLibraryCode/modules/sgp_particle/extension/modifiers/sgp_SPARK_LinearForce.h | phoenixzz/VoronoiMapGen | 5afd852f8bb0212baba9d849178eb135f62df903 | [
"MIT"
] | 11 | 2017-03-03T03:31:15.000Z | 2019-03-01T17:09:12.000Z | SGPLibraryCode/modules/sgp_particle/extension/modifiers/sgp_SPARK_LinearForce.h | phoenixzz/VoronoiMapGen | 5afd852f8bb0212baba9d849178eb135f62df903 | [
"MIT"
] | null | null | null | SGPLibraryCode/modules/sgp_particle/extension/modifiers/sgp_SPARK_LinearForce.h | phoenixzz/VoronoiMapGen | 5afd852f8bb0212baba9d849178eb135f62df903 | [
"MIT"
] | 2 | 2017-03-03T03:31:17.000Z | 2021-05-27T21:50:43.000Z | #ifndef __SGP_SPARKLINEARFORCE_HEADER__
#define __SGP_SPARKLINEARFORCE_HEADER__
// An enum defining the way a factor is applied to a force
enum ForceFactor
{
FACTOR_NONE, /**< No factor is applied */
FACTOR_LINEAR, /**< A linear factor is applied */
FACTOR_SQUARE, /**< A square factor is applied */
};
/**
* A Modifier applying a linear force on particles
*
* The force is identical from any points of the universe (only if the modifier is triggered).
* In that way, particles under the influence of a LinearForce can theorically reach an infinite speed if not under the influence of a friction.
* The force can be multiplied or not by a particle parameter either linearly or squared.
* Note that this Modifier can be used to set a global gravity that can be updated for all groups at a time.
* To do so, the LinearForce has to be used with the param : PARAM_MASS and the FactorType FACTOR_LINEAR.
*/
class SGP_API LinearForce : public Modifier
{
SPARK_IMPLEMENT_REGISTERABLE(LinearForce)
public:
// zone : the Zone of the LinearForce or NULL if no Zone
// trigger : the trigger of the LinearForce
// force : the force of the LinearForce
// type : the type of multiplier for the factor
// param : the parameter used as the factor (if type != FACTOR_NONE)
LinearForce(Zone* zone = NULL,
ModifierTrigger trigger = INSIDE_ZONE,
const Vector3D& force = Vector3D(),
ForceFactor type = FACTOR_NONE,
ModelParam param = PARAM_SIZE);
static LinearForce* create(Zone* zone = NULL,
ModifierTrigger trigger = INSIDE_ZONE,
const Vector3D& force = Vector3D(),
ForceFactor type = FACTOR_NONE,
ModelParam param = PARAM_SIZE);
void setForce(const Vector3D& force);
/**
* Sets the factor type to apply to the force
* type : the type of multiplier for the factor
* param : the parameter of the Particle to use as the factor
*/
void setFactor(ForceFactor type, ModelParam param = PARAM_SIZE);
const Vector3D& getForce() const;
const Vector3D& getTransformedForce() const;
ForceFactor getFactorType() const;
ModelParam getFactorParam() const;
protected:
virtual void innerUpdateTransform();
private:
Vector3D force;
Vector3D tForce;
ForceFactor factorType;
ModelParam factorParam;
virtual void modify(Particle& particle, float deltaTime) const;
};
inline LinearForce* LinearForce::create(Zone* _zone, ModifierTrigger _trigger, const Vector3D& _force, ForceFactor _type, ModelParam _param)
{
LinearForce* obj = new LinearForce(_zone, _trigger, _force, _type, _param);
registerObject(obj);
return obj;
}
inline void LinearForce::setForce(const Vector3D& force)
{
this->force = tForce = force;
notifyForUpdate();
}
inline void LinearForce::setFactor(ForceFactor type, ModelParam param)
{
factorType = type;
factorParam = param;
}
inline const Vector3D& LinearForce::getForce() const
{
return force;
}
inline const Vector3D& LinearForce::getTransformedForce() const
{
return tForce;
}
inline ForceFactor LinearForce::getFactorType() const
{
return factorType;
}
inline ModelParam LinearForce::getFactorParam() const
{
return factorParam;
}
inline void LinearForce::innerUpdateTransform()
{
Modifier::innerUpdateTransform();
tForce = force;
tForce.RotateWith(getWorldTransform());
}
#endif // __SGP_SPARKLINEARFORCE_HEADER__ | 24.61194 | 143 | 0.757126 |
a452835ee966f164281cdda646288efd119bd5bb | 85 | h | C | solve.h | chucktilbury/infix-to-rpn | 1be3b65602a669c9b9a766e8ae281c8d125206dd | [
"MIT"
] | null | null | null | solve.h | chucktilbury/infix-to-rpn | 1be3b65602a669c9b9a766e8ae281c8d125206dd | [
"MIT"
] | null | null | null | solve.h | chucktilbury/infix-to-rpn | 1be3b65602a669c9b9a766e8ae281c8d125206dd | [
"MIT"
] | null | null | null | #ifndef __SOLVE_H__
#define __SOLVE_H__
#include "queue.h"
int solve(queue);
#endif | 12.142857 | 19 | 0.764706 |
00a48f479f458adf80b9b94e12cdaa14a17f29e9 | 6,164 | c | C | test/util-endian.c | JoachimDuquesne/lely | cc6bad10ba57e380386622211e603006eeee0fff | [
"Apache-2.0"
] | 4 | 2020-12-27T11:31:57.000Z | 2022-02-09T11:32:08.000Z | test/util-endian.c | DroidDrive/lely-core | 2ec4560f513264a53d2afaedecdae4a49a39023c | [
"Apache-2.0"
] | null | null | null | test/util-endian.c | DroidDrive/lely-core | 2ec4560f513264a53d2afaedecdae4a49a39023c | [
"Apache-2.0"
] | 1 | 2022-01-03T01:41:59.000Z | 2022-01-03T01:41:59.000Z | #include "test.h"
#include <lely/util/endian.h>
#define VALUE UINT64_C(0x0123456789abcdef)
#define BVAL_BE \
{ \
0x00, 0x3c, 0xda, 0xb8, 0x96, 0x74, 0x52, 0x00 \
}
#define BVAL_LE \
{ \
0x00, 0xdc, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x02 \
}
int
main(void)
{
tap_plan(4);
// clang-format off
static const uint_least8_t rev8[] = {
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff
};
// clang-format on
int ok;
uint_least8_t chk[1 + 8 + 1];
uint_least8_t src[1 + 8 + 1];
uint_least8_t dst[8];
bcpybe(0, 0, 0, 0, 0);
ok = 1;
for (int dstbit = 0; dstbit < 8 * 8 && ok; dstbit++) {
memset(chk, 0, sizeof(chk));
chk[dstbit / 8] = rev8[1 << (dstbit % 8)];
for (int srcbit = 0; srcbit < 8 * 8 && ok; srcbit++) {
memset(src, 0, sizeof(src));
src[srcbit / 8] = rev8[1 << (srcbit % 8)];
memset(dst, 0, sizeof(dst));
bcpybe(dst, dstbit, src, srcbit, 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
memset(dst, 0, sizeof(dst));
bcpybe(dst + 1, dstbit - 8, src, srcbit, 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
memset(dst, 0, sizeof(dst));
bcpybe(dst, dstbit, src + 1, srcbit - 8, 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
memset(dst, 0, sizeof(dst));
bcpybe(dst + 1, dstbit - 8, src + 1, srcbit - 8, 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
}
}
tap_test(ok);
ok = 1;
for (int n = 1; n <= 16 && ok; n++) {
uint_least32_t mask = (1ul << n) - 1;
for (int dstbit = 0; dstbit < (8 * 8 - n) && ok; dstbit++) {
memset(chk, 0, sizeof(chk));
uint_least32_t val = mask << (dstbit % 8);
chk[(dstbit / 8) + 0] = rev8[val & 0xff];
chk[(dstbit / 8) + 1] = rev8[(val >> 8) & 0xff];
chk[(dstbit / 8) + 2] = rev8[(val >> 16) & 0xff];
for (int srcbit = 0; srcbit < (8 * 8 - n) && ok;
srcbit++) {
memset(src, 0, sizeof(src));
val = mask << (srcbit % 8);
src[(srcbit / 8) + 0] = rev8[val & 0xff];
src[(srcbit / 8) + 1] = rev8[(val >> 8) & 0xff];
src[(srcbit / 8) + 2] =
rev8[(val >> 16) & 0xff];
memset(dst, 0, sizeof(dst));
bcpybe(dst, dstbit, src, srcbit, n + 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
memset(dst, 0, sizeof(dst));
bcpybe(dst + 1, dstbit - 8, src, srcbit, n + 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
memset(dst, 0, sizeof(dst));
bcpybe(dst, dstbit, src + 1, srcbit - 8, n + 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
memset(dst, 0, sizeof(dst));
bcpybe(dst + 1, dstbit - 8, src + 1, srcbit - 8,
n + 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
}
}
}
tap_test(ok);
bcpyle(0, 0, 0, 0, 0);
ok = 1;
for (int dstbit = 0; dstbit < 8 * 8 && ok; dstbit++) {
memset(chk, 0, sizeof(chk));
chk[dstbit / 8] = 1 << (dstbit % 8);
for (int srcbit = 0; srcbit < 8 * 8 && ok; srcbit++) {
memset(src, 0, sizeof(src));
src[srcbit / 8] = 1 << (srcbit % 8);
memset(dst, 0, sizeof(dst));
bcpyle(dst, dstbit, src, srcbit, 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
memset(dst, 0, sizeof(dst));
bcpyle(dst + 1, dstbit - 8, src, srcbit, 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
memset(dst, 0, sizeof(dst));
bcpyle(dst, dstbit, src + 1, srcbit - 8, 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
memset(dst, 0, sizeof(dst));
bcpyle(dst + 1, dstbit - 8, src + 1, srcbit - 8, 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
}
}
tap_test(ok);
ok = 1;
for (int n = 1; n <= 16 && ok; ++n) {
uint_least32_t mask = (1u << n) - 1;
for (int dstbit = 0; dstbit < (8 * 8 - n) && ok; ++dstbit) {
memset(chk, 0, sizeof(chk));
uint_least32_t val = mask << (dstbit % 8);
chk[(dstbit / 8) + 0] = val & 0xff;
chk[(dstbit / 8) + 1] = (val >> 8) & 0xff;
chk[(dstbit / 8) + 2] = (val >> 16) & 0xff;
for (int srcbit = 0; srcbit < (8 * 8 - n) && ok;
++srcbit) {
memset(src, 0, sizeof(src));
val = mask << (srcbit % 8);
src[(srcbit / 8) + 0] = val & 0xff;
src[(srcbit / 8) + 1] = (val >> 8) & 0xff;
src[(srcbit / 8) + 2] = (val >> 16) & 0xff;
memset(dst, 0, sizeof(dst));
bcpyle(dst, dstbit, src, srcbit, n + 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
memset(dst, 0, sizeof(dst));
bcpyle(dst + 1, dstbit - 8, src, srcbit, n + 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
memset(dst, 0, sizeof(dst));
bcpyle(dst, dstbit, src + 1, srcbit - 8, n + 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
memset(dst, 0, sizeof(dst));
bcpyle(dst + 1, dstbit - 8, src + 1, srcbit - 8,
n + 1);
ok = ok && !memcmp(dst, chk, sizeof(dst));
}
}
}
tap_test(ok);
return 0;
}
| 31.131313 | 62 | 0.555159 |
93f88d091c0e6863ef3aab3be947c17dc2e2ebc1 | 1,832 | h | C | include/pola/utils/WStringImpl.h | lij0511/pandora | 5988618f29d2f1ba418ef54a02e227903c1e7108 | [
"Apache-2.0"
] | null | null | null | include/pola/utils/WStringImpl.h | lij0511/pandora | 5988618f29d2f1ba418ef54a02e227903c1e7108 | [
"Apache-2.0"
] | null | null | null | include/pola/utils/WStringImpl.h | lij0511/pandora | 5988618f29d2f1ba418ef54a02e227903c1e7108 | [
"Apache-2.0"
] | null | null | null | /*
* WStringImpl.h
*
* Created on: 2016年7月13日
* Author: lijing
*/
#ifndef POLA_WSTRINGIMPL_H_
#define POLA_WSTRINGIMPL_H_
#include "pola/utils/RefBase.h"
#include "pola/utils/JenkinsHash.h"
#include "pola/utils/Unicode.h"
size_t wstrlen(const wchar* wstr);
namespace pola {
namespace utils {
void utf8_to_utf16(const char* u8str, size_t u8strlen, wchar*& u16str, size_t& u16strlen);
void utf16_to_utf8(const wchar* u16str, size_t u16strlen, char*& u8str, size_t& u8strlen);
class WString;
/*
*
*/
class WStringImpl: public RefBase {
private:
friend class WString;
WStringImpl();
WStringImpl(const wchar* chars, size_t length);
WStringImpl(const wchar* str);
public:
static WStringImpl* emptyString();
static WStringImpl* create(const wchar* chars, size_t length);
static WStringImpl* create(const wchar* str);
~WStringImpl();
void print() const;
size_t length() const;
bool isEmpty() const;
wchar charAt(size_t index) const;
const wchar* characters() const;
bool startsWith(const WStringImpl& str, size_t start = 0) const;
bool endsWith(const WStringImpl& str) const;
bool contains(const WStringImpl& str) const;
/**
* operator compares.
*/
bool operator==(const WStringImpl& s) const;
bool operator!=(const WStringImpl& s) const;
bool operator>(const WStringImpl& s) const;
bool operator>=(const WStringImpl& s) const;
bool operator<(const WStringImpl& s) const;
bool operator<=(const WStringImpl& s) const;
hash_t hash() const;
private:
int32_t compare(const WStringImpl& s) const;
void checkIndex(size_t index) const {
LOG_FATAL_IF(index >= length(), "IndexOutOfBounds index=%d, length=%d!\n", index, m_length);
}
const wchar* m_data;
size_t m_length;
mutable hash_t m_hash;
};
} /* namespace utils */
} /* namespace pola */
#endif /* POLA_WSTRINGIMPL_H_ */
| 21.809524 | 94 | 0.725983 |
2f5539d99d73728108cea3fd2e09c68ec71199ba | 2,041 | c | C | source/SoBkgManager.c | JaapSuter/SGADE | 2291fdb51bacb013eecd59d9681b7cf0eaf19f27 | [
"Zlib"
] | 5 | 2017-03-21T08:17:59.000Z | 2020-05-09T23:26:41.000Z | source/SoBkgManager.c | JaapSuter/SGADE | 2291fdb51bacb013eecd59d9681b7cf0eaf19f27 | [
"Zlib"
] | null | null | null | source/SoBkgManager.c | JaapSuter/SGADE | 2291fdb51bacb013eecd59d9681b7cf0eaf19f27 | [
"Zlib"
] | null | null | null | // ----------------------------------------------------------------------------
/*!
Copyright (C) 2002 by the SGADE authors
For conditions of distribution and use, see copyright notice in SoLicense.txt
\file SoBkgManager.c
\author Jaap Suter
\date Feb 5 2002
\ingroup SoBkgManager
See the \a SoBkgManager module for more information.
*/
// ----------------------------------------------------------------------------
// ----------------------------------------------------------------------------
// Includes
// ----------------------------------------------------------------------------
#include "SoBkgManager.h"
#include "SoDisplay.h"
#include "SoDebug.h"
// ----------------------------------------------------------------------------
// Function implementations.
// ----------------------------------------------------------------------------
// ----------------------------------------------------------------------------
/*!
\brief Sets the background mosaic values
\param a_HorizontalSize Horizontal Mosaic value, 0 is no mosaic value,
15 is maximum value.
\param a_VerticalSize Vertical Mosaic value, 0 is no mosaic value,
15 is maximum value.
*/
// ----------------------------------------------------------------------------
void SoBkgManagerSetMosaic( u32 a_HorizontalSize, u32 a_VerticalSize )
{
// Assert the input range;
SO_ASSERT( a_HorizontalSize < 16, "Mosaic size can't be bigger than 15" );
SO_ASSERT( a_VerticalSize < 16, "Mosaic size can't be bigger than 15" );
// Clear any previous value;
SO_REG_DISP_MOSAIC_RW &= ~(SO_8_BITS);
// Set the new value;
SO_REG_DISP_MOSAIC_RW |= (a_HorizontalSize | (a_VerticalSize << 4));
// Copy it into the write only hardware register;
SO_REG_DISP_MOSAIC_W = SO_REG_DISP_MOSAIC_RW;
}
// ----------------------------------------------------------------------------
// ----------------------------------------------------------------------------
// EOF
// ----------------------------------------------------------------------------
| 37.109091 | 79 | 0.416463 |
13b366f15bcdf0557041c323b62715c2311ff2fb | 6,962 | c | C | src/ex8_get_cert_and_set_label.c | AktivCo/pkcs11_for_dummies | 650ecb496f7f80ba2bd22659e4a92f63889d2bca | [
"OpenSSL"
] | null | null | null | src/ex8_get_cert_and_set_label.c | AktivCo/pkcs11_for_dummies | 650ecb496f7f80ba2bd22659e4a92f63889d2bca | [
"OpenSSL"
] | null | null | null | src/ex8_get_cert_and_set_label.c | AktivCo/pkcs11_for_dummies | 650ecb496f7f80ba2bd22659e4a92f63889d2bca | [
"OpenSSL"
] | null | null | null | /*************************************************************************
* Rutoken *
* Copyright (c) 2003-2021, Aktiv-Soft JSC. All rights reserved. *
* Подробная информация: http://www.rutoken.ru *
*------------------------------------------------------------------------*
* Пример работы с Рутокен при помощи библиотеки PKCS#11 на языке C *
*------------------------------------------------------------------------*
* Пример чтения и установки атрибутов PKCS#11 объектов *
*************************************************************************/
#include <Common.h>
#include "utils.h"
int get_cert_and_set_label_on_slot(CK_SLOT_ID slot, char* pin);
int get_cert(CK_SESSION_HANDLE session, CK_OBJECT_HANDLE cert);
int set_cert_label(CK_SESSION_HANDLE session, CK_OBJECT_HANDLE cert);
int main(void)
{
CK_SLOT_ID_PTR slots; // Массив идентификаторов слотов
CK_ULONG slotCount; // Количество идентификаторов слотов в массиве
char* pin = "12345678";
CK_RV rv; // Код возврата. Могут быть возвращены только ошибки, определенные в PKCS#11
int errorCode = 1; // Флаг ошибки
// инициализируем библиотеку
if (init_pkcs11())
goto exit;
// получаем список слотов
if (get_slot_list(&slots, &slotCount))
goto free_pkcs11;
if (slotCount == 0) {
printf("No token found\n");
goto free_slots;
}
// получение тела сертификата хранящегося на токене и изменение его метки
if (get_cert_and_set_label_on_slot(slots[0], pin))
goto free_slots;
errorCode = 0;
/*************************************************************************
* Очистить память, выделенную под слоты *
*************************************************************************/
free_slots:
free(slots);
/*************************************************************************
* Деинициализировать библиотеку *
*************************************************************************/
free_pkcs11:
free_pkcs11();
exit:
if (errorCode) {
printf("\n\nSome error occurred. Sample failed.\n");
} else {
printf("\n\nSample has been completed successfully.\n");
}
return errorCode;
}
int get_cert_and_set_label_on_slot(CK_SLOT_ID slot, char* pin)
{
CK_SESSION_HANDLE session; // Хэндл открытой сессии
CK_OBJECT_HANDLE certificate;
CK_RV rv; // Код возврата. Могут быть возвращены только ошибки, определенные в PKCS#11
int errorCode = 1;
/*************************************************************************
* Открыть RW сессию в первом доступном слоте *
*************************************************************************/
rv = functionList->C_OpenSession(slot, CKF_SERIAL_SESSION | CKF_RW_SESSION, NULL_PTR, NULL_PTR, &session);
CHECK_AND_LOG(" C_OpenSession", rv == CKR_OK, rvToStr(rv), exit);
/*************************************************************************
* Выполнить аутентификацию Пользователя *
*************************************************************************/
rv = functionList->C_Login(session, CKU_USER, pin, strlen(pin));
CHECK_AND_LOG(" C_Login (CKU_USER)", rv == CKR_OK, rvToStr(rv), close_session);
if (find_certificate(session, &certificate)) {
printf("No certificate found");
goto logout;
}
if (get_cert(session, certificate))
goto logout;
if (set_cert_label(session, certificate))
goto logout;
errorCode = 0;
/*************************************************************************
* Сбросить права доступа *
*************************************************************************/
logout:
rv = functionList->C_Logout(session);
CHECK_RELEASE_AND_LOG(" C_Logout", rv == CKR_OK, rvToStr(rv), errorCode);
/*************************************************************************
* Закрыть открытую сессию в слоте *
*************************************************************************/
close_session:
rv = functionList->C_CloseSession(session);
CHECK_RELEASE_AND_LOG(" C_CloseSession", rv == CKR_OK, rvToStr(rv), errorCode);
exit:
return errorCode;
}
int get_cert(CK_SESSION_HANDLE session, CK_OBJECT_HANDLE cert)
{
CK_BYTE_PTR body = NULL_PTR;
CK_ATTRIBUTE template[] = {
{CKA_VALUE, NULL_PTR, 0}
};
char* certPem;
CK_RV rv;
int errorCode=1;
/*************************************************************************
* Получение размера тела сертификата *
*************************************************************************/
rv = functionList->C_GetAttributeValue(session, cert, template, arraysize(template));
CHECK_AND_LOG(" C_GetAttributeValue", rv == CKR_OK, rvToStr(rv), exit);
body = (CK_BYTE_PTR) malloc(template[0].ulValueLen);
template[0].pValue = body;
/*************************************************************************
* Получение тела сертификата в формате DER *
*************************************************************************/
rv = functionList->C_GetAttributeValue(session, cert, template, arraysize(template));
CHECK_AND_LOG(" C_GetAttributeValue", rv == CKR_OK, rvToStr(rv), free);
/*************************************************************************
* Сконвертировать и распечатать буфер в формате PEM *
*************************************************************************/
GetCertAsPem(body, template[0].ulValueLen, &certPem);
CHECK(" Get cert in PEM format", certPem != NULL, free);
printf("\nCertificate request is:\n");
printf("%s\n", certPem);
errorCode = 0;
printf("Getting cert body has been completed successfully.\n");
free_cert_pem:
free(certPem);
free:
free(body);
exit:
return errorCode;
}
int set_cert_label(CK_SESSION_HANDLE session, CK_OBJECT_HANDLE cert)
{
CK_UTF8CHAR label[] = {"GOST certificate"};
CK_ATTRIBUTE template[] = {
CKA_LABEL, label, sizeof(label)-1
};
CK_RV rv;
int errorCode = 1;
/*************************************************************************
* Установка метки сертификата *
*************************************************************************/
rv = functionList->C_SetAttributeValue(session, cert, template, arraysize(template));
CHECK_AND_LOG(" C_SetAttributeValue", rv == CKR_OK, rvToStr(rv), exit);
errorCode = 0;
exit:
return errorCode;
}
| 36.835979 | 128 | 0.455616 |
4d8974e4d82f0de2cab34a052af8af5d44b7cd5f | 3,570 | c | C | src/postcard/collector.c | brucespang/ifip21-buffer-sizing | 5f7ae08f2bcdac10846316e7b9d0bd44bad12c51 | [
"Apache-2.0"
] | null | null | null | src/postcard/collector.c | brucespang/ifip21-buffer-sizing | 5f7ae08f2bcdac10846316e7b9d0bd44bad12c51 | [
"Apache-2.0"
] | null | null | null | src/postcard/collector.c | brucespang/ifip21-buffer-sizing | 5f7ae08f2bcdac10846316e7b9d0bd44bad12c51 | [
"Apache-2.0"
] | null | null | null | #include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <stdio.h>
#include <netdb.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <pthread.h>
#include <unistd.h>
#include <time.h>
typedef struct __attribute__((packed)) {
uint8_t unaligned_field_1;
uint8_t unaligned_field_2;
uint8_t unaligned_field_3;
struct in_addr ipv4_src_addr;
struct in_addr ipv4_dst_addr;
uint16_t src_port;
uint16_t dst_port;
uint32_t seq_no;
uint32_t ack_no;
uint32_t queue_depth;
uint32_t timestamp;
uint32_t switch_id;
} postcard_hdr;
static uint64_t num_received;
__attribute__((noreturn)) static void* postcard_collector() {
num_received = 0;
int sock = socket(AF_INET, SOCK_DGRAM, 0);
if (sock < 0) {
perror("socket");
exit(1);
}
int opt = 1;
setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, (const void *)&opt , sizeof(int));
struct sockaddr_in addr;
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_ANY);
addr.sin_port = htons(4444);
if (bind(sock, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
perror("bind");
exit(1);
}
char buf[1024];
struct sockaddr_in client;
uint32_t client_len = sizeof(client);
while (true) {
ssize_t received = recvfrom(sock, buf, 1024, 0, (struct sockaddr*) &client, &client_len);
if (received < 0) {
perror("recvfrom");
exit(1);
}
assert(received == 43);
struct timespec time;
if (clock_gettime(CLOCK_MONOTONIC_RAW, &time) < 0) {
perror("clock_gettime");
exit(1);
}
num_received += 1;
postcard_hdr* postcard = (postcard_hdr*)buf;
uint8_t was_dropped = (postcard->unaligned_field_1 & 0b01110000) >> 4;
uint32_t egress_port = ((uint32_t)(postcard->unaligned_field_1 & 0b00000011) << 7) + ((uint32_t)(postcard->unaligned_field_2 & 0b11111110) >> 1);
uint32_t ingress_port = ((uint32_t)(postcard->unaligned_field_2 & 0b00000001) << 8) + (uint32_t)(postcard->unaligned_field_3 & 0b11111110);
// not converting ipv4 addrs since inet_ntoa requires network byte order.
postcard->src_port = ntohs(postcard->src_port);
postcard->dst_port = ntohs(postcard->dst_port);
postcard->seq_no = ntohl(postcard->seq_no);
postcard->ack_no = ntohl(postcard->ack_no);
postcard->queue_depth = ntohl(postcard->queue_depth);
postcard->timestamp = ntohl(postcard->timestamp);
postcard->switch_id = ntohl(postcard->switch_id);
printf("%ld.%09ld: postcard: ", time.tv_sec, time.tv_nsec);
printf("ipv4_src_addr=%s ", inet_ntoa(postcard->ipv4_src_addr));
printf("ipv4_dst_addr=%s ", inet_ntoa(postcard->ipv4_dst_addr));
printf("src_port=%d ", postcard->src_port);
printf("dst_port=%u ", postcard->dst_port);
printf("seq_no=%u ", postcard->seq_no);
printf("ack_no=%u ", postcard->ack_no);
printf("queue_depth_cells=%u ", postcard->queue_depth);
printf("postcard_timestamp=%u ", postcard->timestamp);
printf("switch_id=%u ", postcard->switch_id);
printf("was_dropped=%u ", was_dropped);
printf("egress_port=%u ", egress_port);
printf("ingress_port=%u\n", ingress_port);
fflush(stdout);
}
}
int main() {
pthread_t collector;
if (pthread_create(&collector, NULL, postcard_collector, NULL) < 0) {
perror("pthread_create");
exit(1);
}
uint64_t last_received = 0;
while (true) {
fprintf(stderr, "rate: %lu postcards/sec\n", num_received - last_received);
last_received = num_received;
sleep(1);
}
}
| 30 | 149 | 0.680112 |
9ef135bfb18b50e61cfc05cca4b20443f51507aa | 4,469 | h | C | elenasrc2/common/common.h | drkameleon/elena-lang | 8585e93a3bc0b19f8d60029ffbe01311d0b711a3 | [
"MIT"
] | null | null | null | elenasrc2/common/common.h | drkameleon/elena-lang | 8585e93a3bc0b19f8d60029ffbe01311d0b711a3 | [
"MIT"
] | null | null | null | elenasrc2/common/common.h | drkameleon/elena-lang | 8585e93a3bc0b19f8d60029ffbe01311d0b711a3 | [
"MIT"
] | null | null | null | //---------------------------------------------------------------------------
// E L E N A P r o j e c t: ELENA Common Library
//
// This file contains the common templates, classes,
// structures, functions and constants
// (C)2005-2017, by Alexei Rakov
//---------------------------------------------------------------------------
#ifndef commonH
#define commonH 1
// --- Common definitions ---
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifdef _WIN32
#include <wchar.h>
#define PATH_SEPARATOR '\\'
namespace _ELENA_
{
typedef wchar_t path_c;
typedef wchar_t wide_c;
typedef unsigned int unic_c;
typedef unsigned char uident_c;
typedef unsigned int ref_t;
typedef unsigned int pos_t;
typedef unsigned long long ref64_t;
typedef unsigned long long pos64_t;
typedef unsigned __int64 uint64_t;
// --- FileEncoding ---
enum FileEncoding { feAnsi = 0, feRaw = -1, feUTF8 = -2, feUTF16 = -3, feUTF32 = -4 };
}
#else
#include <climits>
#define PATH_SEPARATOR '/'
namespace _ELENA_
{
typedef char path_c;
typedef unsigned short wide_c;
typedef unsigned int unic_c;
typedef unsigned char uident_c;
typedef size_t ref_t;
typedef unsigned int pos_t;
typedef unsigned long long ref64_t;
typedef unsigned long long pos64_t;
typedef unsigned long long uint64_t;
// --- FileEncoding ---
enum FileEncoding { feUTF8 = 0, feRaw = -1, feUTF16 = -2, feUTF32 = -3 };
}
#endif
// --- Common headers ---
#include "tools.h"
#include "altstrings.h"
#include "streams.h"
#include "dump.h"
#include "lists.h"
#include "files.h"
#define DEFAULT_STR (_ELENA_::ident_t)NULL
namespace _ELENA_
{
// --- Common mapping type definitions ---
typedef Dictionary2D<ident_t, ident_t> ConfigSettings;
typedef _Iterator<ConfigSettings::VItem, _MapItem<ident_t, ConfigSettings::VItem>, ident_t> ConfigCategoryIterator;
// --- Base Config File ---
class _ConfigFile
{
public:
struct Node
{
_ConfigFile* owner;
void* reference;
ident_t Content()
{
if (reference) {
return owner->getNodeContent(reference);
}
else return NULL;
}
ident_t Attribute(ident_t name)
{
return owner->getNodeAttribute(reference, name);
}
bool select(ident_t key, Map<ident_t, Node>& list)
{
return owner->select(*this, key, list);
}
Node(_ConfigFile* owner, void* reference)
{
this->owner = owner;
this->reference = reference;
}
Node(_ConfigFile* owner)
{
this->owner = owner;
this->reference = NULL;
}
Node()
{
owner = NULL;
reference = NULL;
}
};
typedef Map<ident_t, Node> Nodes;
virtual Node get(ident_t key) = 0;
virtual bool select(ident_t key, Nodes& list) = 0;
virtual bool select(Node root, ident_t subKey, Nodes& list) = 0;
virtual ident_t getNodeContent(void* reference) = 0;
virtual ident_t getNodeAttribute(void* reference, ident_t name) = 0;
virtual ident_t getSetting(ident_t key)
{
Node node = get(key);
return node.Content();
}
virtual int getIntSetting(ident_t key, int defaultValue = 0)
{
ident_t value = getSetting(key);
return emptystr(value) ? defaultValue : value.toInt();
}
virtual int getHexSetting(ident_t key, int defaultValue = 0)
{
ident_t value = getSetting(key);
return emptystr(value) ? defaultValue : value.toLong(16);
}
virtual int getBoolSetting(ident_t key, bool defaultValue = false)
{
ident_t value = getSetting(key);
if (!emptystr(value)) {
return value.compare("-1");
}
else return defaultValue;
}
virtual bool load(path_t path, int encoding) = 0;
virtual ~_ConfigFile() {}
};
// --- ConstantIdentifier ---
class WideString : public String <wide_c, 0x100>
{
public:
WideString()
{
}
WideString(ident_t message)
{
size_t length = 0x100;
message.copyTo(_string, length);
_string[length] = 0;
}
WideString(ident_t message, size_t length)
{
size_t wideLength = 0x100;
Convertor::copy(_string, message, length, wideLength);
_string[wideLength] = 0;
}
};
} // _ELENA_
#endif // commonH
| 22.457286 | 115 | 0.595659 |
9afa715424ec69ee2447b3467087a86d678a1465 | 623 | h | C | NBA/view/theGameBetterCell.h | Zeng0413/ZDX_NBA | fb76c2e210d82d28268ed8b7d5d721380255ff04 | [
"MIT"
] | null | null | null | NBA/view/theGameBetterCell.h | Zeng0413/ZDX_NBA | fb76c2e210d82d28268ed8b7d5d721380255ff04 | [
"MIT"
] | null | null | null | NBA/view/theGameBetterCell.h | Zeng0413/ZDX_NBA | fb76c2e210d82d28268ed8b7d5d721380255ff04 | [
"MIT"
] | null | null | null | //
// theGameBetterCell.h
// NBA
//
// Created by zdx on 2017/3/18.
// Copyright © 2017年 zdx. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "UITableView+SDAutoTableViewCellHeight.h"
#import "UIView+SDAutoLayout.h"
#import "SDAutoLayout.h"
#import "maxPlayers.h"
#import "theGamePlayer.h"
@interface theGameBetterCell : UITableViewCell
@property (strong, nonatomic) UIImageView *leftImageView;
@property (strong, nonatomic) UIButton *leftBT;
@property (strong, nonatomic) UIButton *rightBT;
@property (strong, nonatomic) UIImageView *rightImageView;
@property (strong, nonatomic) maxPlayers *model;
@end
| 23.074074 | 58 | 0.752809 |
b141915571982a72a65edeeb5f3f793080778cf8 | 19,672 | c | C | CWProtocol.c | travelping/openCAPWAP | 00cad1259ccf0555190aa80b86721606468f6a28 | [
"MIT"
] | 1 | 2017-01-11T05:58:21.000Z | 2017-01-11T05:58:21.000Z | CWProtocol.c | travelping/openCAPWAP | 00cad1259ccf0555190aa80b86721606468f6a28 | [
"MIT"
] | null | null | null | CWProtocol.c | travelping/openCAPWAP | 00cad1259ccf0555190aa80b86721606468f6a28 | [
"MIT"
] | null | null | null | /*******************************************************************************************
* Copyright (c) 2006-7 Laboratorio di Sistemi di Elaborazione e Bioingegneria Informatica *
* Universita' Campus BioMedico - Italy *
* *
* This program is free software; you can redistribute it and/or modify it under the terms *
* of the GNU General Public License as published by the Free Software Foundation; either *
* version 2 of the License, or (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, but WITHOUT ANY *
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A *
* PARTICULAR PURPOSE. See the GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License along with this *
* program; if not, write to the: *
* Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, *
* MA 02111-1307, USA. *
* *
* In addition, as a special exception, the copyright holders give permission to link the *
* code of portions of this program with the OpenSSL library under certain conditions as *
* described in each individual source file, and distribute linked combinations including *
* the two. You must obey the GNU General Public License in all respects for all of the *
* code used other than OpenSSL. If you modify file(s) with this exception, you may *
* extend this exception to your version of the file(s), but you are not obligated to do *
* so. If you do not wish to do so, delete this exception statement from your version. *
* If you delete this exception statement from all source files in the program, then also *
* delete it here. *
* *
* --------------------------------------------------------------------------------------- *
* Project: Capwap *
* *
* Author : Ludovico Rossi (ludo@bluepixysw.com) *
* Del Moro Andrea (andrea_delmoro@libero.it) *
* Giovannini Federica (giovannini.federica@gmail.com) *
* Massimo Vellucci (m.vellucci@unicampus.it) *
* Mauro Bisson (mauro.bis@gmail.com) *
*******************************************************************************************/
#include "CWCommon.h"
#include "CWVendorPayloads.h"
#include "WUM.h"
pthread_mutex_t gRADIO_MAC_mutex;
static const int gCWIANATimes256 = CW_IANA_ENTERPRISE_NUMBER * 256;
static const int gMaxDTLSHeaderSize = 25; // see http://crypto.stanford.edu/~nagendra/papers/dtls.pdf
static const int gMaxCAPWAPHeaderSize = 8; // note: this include optional Wireless field
unsigned char gRADIO_MAC[6]; // note: this include optional Wireless field
CWBool CWMessageEnsureSpace(const void *ctx, CWProtocolMessage *pm, size_t size)
{
if (sizeof(CWProtocolMessage) + pm->pos + size > pm->space) {
pm->space = RND_TO(sizeof(CWProtocolMessage) + pm->pos + size, MSG_BLOCK_SIZE);
pm->data = reralloc_size(ctx, pm->data, pm->space);
if (!pm->data)
return CW_FALSE;
CW_ZERO_MEMORY(pm->data + pm->pos, pm->space - pm->pos);
}
return CW_TRUE;
}
/**
* retrieves a string (not null-terminated) from the message, increments the current offset in bytes.
* Adds the '\0' char at the end of the string which is returned
*/
char *CWProtocolRetrieveStr(const void *ctx, CWProtocolMessage *pm, int len)
{
char *str;
if (!(str = ralloc_strndup(ctx, (char *)pm->data + pm->pos, len)))
return NULL;
pm->pos += len;
return str;
}
/**
* retrieves len bytes from the message, increments the current offset in bytes.
*/
unsigned char *CWProtocolRetrieveRawBytes(const void *ctx, CWProtocolMessage *pm, int len)
{
unsigned char *bytes;
if (!(bytes = ralloc_memdup(ctx, pm->data + pm->pos, len)))
return NULL;
pm->pos += len;
return bytes;
}
/**
* retrieves len bytes from the message, increments the current offset in bytes.
*/
void CWProtocolCopyRawBytes(void *dest, CWProtocolMessage *pm, int len)
{
CW_COPY_MEMORY(dest, pm->data + pm->pos, len);
pm->pos += len;
}
void CWProtocolDestroyMsgElemData(void *f)
{
CW_FREE_OBJECT(f);
}
CWBool CWAssembleVendorMsgElemResultCodeWithPayload(const void *ctx, CWProtocolMessage *pm,
CWProtocolResultCode code,
CWProtocolVendorSpecificValues * payload)
{
if (pm == NULL)
return CWErrorRaise(CW_ERROR_WRONG_ARG, NULL);
int payloadSize = 0;
CWVendorUciValues *uciPayload = NULL;
CWVendorWumValues *wumPayload = NULL;
switch (payload->vendorPayloadType) {
case CW_MSG_ELEMENT_VENDOR_SPEC_PAYLOAD_UCI:
uciPayload = (CWVendorUciValues *) payload->payload;
if (uciPayload->response != NULL)
payloadSize = (strlen(uciPayload->response) * sizeof(unsigned char));
break;
case CW_MSG_ELEMENT_VENDOR_SPEC_PAYLOAD_WUM:
wumPayload = (CWVendorWumValues *) payload->payload;
payloadSize = sizeof(unsigned char); /* default, only type */
if (wumPayload->type == WTP_VERSION_RESPONSE)
payloadSize = sizeof(unsigned char) * 4;
break;
}
CWInitMsgElem(ctx, pm, 4 + 8 + payloadSize, CW_MSG_ELEMENT_RESULT_CODE_CW_TYPE);
CWProtocolStore32(pm, code);
// CWDebugLog("Result Code: %d", code);
switch (payload->vendorPayloadType) {
case CW_MSG_ELEMENT_VENDOR_SPEC_PAYLOAD_UCI:
/*Store what type of payload we have */
CWProtocolStore16(pm, CW_MSG_ELEMENT_VENDOR_SPEC_PAYLOAD_CW_TYPE);
/*Store what type of vendor payload we have */
CWProtocolStore16(pm, CW_MSG_ELEMENT_VENDOR_SPEC_PAYLOAD_UCI);
/*Store payload size */
CWProtocolStore32(pm, payloadSize);
if (uciPayload->response != NULL)
/*Store the payload */
CWProtocolStoreStr(pm, uciPayload->response);
break;
case CW_MSG_ELEMENT_VENDOR_SPEC_PAYLOAD_WUM:
/* Store what type of payload we have */
CWProtocolStore16(pm, CW_MSG_ELEMENT_VENDOR_SPEC_PAYLOAD_CW_TYPE);
/* Store what type of vendor payload we have */
CWProtocolStore16(pm, CW_MSG_ELEMENT_VENDOR_SPEC_PAYLOAD_WUM);
/* Store payload size */
CWProtocolStore32(pm, payloadSize);
CWProtocolStore8(pm, wumPayload->type);
if (wumPayload->type == WTP_VERSION_RESPONSE) {
CWProtocolStore8(pm, wumPayload->_major_v_);
CWProtocolStore8(pm, wumPayload->_minor_v_);
CWProtocolStore8(pm, wumPayload->_revision_v_);
}
break;
}
CWFinalizeMsgElem(pm);
return CW_TRUE;
}
CWBool CWAssembleMsgElemResultCode(const void *ctx, CWProtocolMessage *pm, CWProtocolResultCode code)
{
if (pm == NULL)
return CWErrorRaise(CW_ERROR_WRONG_ARG, NULL);
CWInitMsgElem(ctx, pm, 4, CW_MSG_ELEMENT_RESULT_CODE_CW_TYPE);
CWProtocolStore32(pm, code);
CWFinalizeMsgElem(pm);
return CW_TRUE;
}
/**
* Assemble a CAPWAP Control Packet, with the given Message Elements, Sequence Number and Message Type.
* Create Transport and Control Headers.
*
* completeMsgPtr is an array of fragments (can be of size 1 if the packet doesn't need fragmentation
*/
CWBool CWAssembleMessage(CWTransportMessage *tm, int PMTU, CWProtocolMessage *msg)
{
return CWAssembleDataMessage(tm, PMTU, 1, BINDING_IEEE_802_11, CW_FALSE, CW_FALSE, NULL, NULL, msg);
}
#define THDR_ROOM 64
#define FRGMT_BUFFER (3 * 1024)
#define FRGMT_MAX 16
#define in_range_s(v, start, end) \
(((v) >= (start)) && ((v) < (end)))
#define in_range_e(v, start, end) \
(((v) > (start)) && ((v) <= (end)))
#define overlap(s1, e1, s2, e2) \
(in_range_s(s1, s2, e2) || in_range_e(e1, s2, e2) || \
in_range_s(s2, s1, e1) || in_range_e(e2, s1, e1))
static
CWBool CWAddFragment(CWFragmentBuffer *b, CWProtocolMessage *pm)
{
int i;
unsigned int start = CWTransportHeaderFragmentOffset(pm) * 8;
unsigned int end = start + (pm->space - pm->pos);
if (end > FRGMT_BUFFER)
return CW_FALSE;
printf("CWAddFragment: New start: %d, end: %d\n", start, end);
for (i = 0; i < b->count; i++)
printf(" before:[%2d]: %8d/%8d\n", i, b->parts[i].start, b->parts[i].end);
printf("\nAction: ");
for (i = 0; i < b->count; i++) {
if (overlap(b->parts[i].start, b->parts[i].end, start, end)) {
printf("skip due to overlap\n");
return CW_FALSE;
}
if (b->parts[i].end == start) {
/* append to current fragment */
printf("append to current fragment\n");
b->parts[i].end = end;
if (i + 1 < b->count)
if (b->parts[i].end == b->parts[i + 1].start) {
/* merge current to next fragment */
printf("merge current to next fragment\n");
b->parts[i].end = b->parts[i + 1].end;
b->count--;
if (i + 1 < b->count)
memmove(&b->parts[i + 1], &b->parts[i + 2], sizeof(b->parts[i]) * (b->count - (i + 2)));
}
break;
}
else if (b->parts[i].start == end) {
/* prepend to current fragment */
printf("prepend to current fragment\n");
b->parts[i].start = start;
break;
}
else if (b->parts[i].start > start) {
/* insert before */
printf("insert before current fragment\n");
if (b->count >= FRGMT_MAX)
return CW_FALSE;
memmove(&b->parts[i + 1], &b->parts[i], sizeof(b->parts[i]) * (b->count - i));
b->parts[i].start = start;
b->parts[i].end = end;
b->count++;
break;
}
}
if (i == b->count) {
printf("append to list\n");
if (b->count >= FRGMT_MAX)
return CW_FALSE;
b->parts[i].start = start;
b->parts[i].end = end;
b->count++;
}
printf("\n");
for (i = 0; i < b->count; i++)
printf(" before:[%2d]: %8d/%8d\n", i, b->parts[i].start, b->parts[i].end);
printf("\n");
if (CWTransportHeaderIsLast(pm))
b->length = end;
if (start == 0) {
if (pm->pos > THDR_ROOM)
/* make sure the transport header fits the reserved space */
return CW_FALSE;
/* first packet - take everything, including the transport header */
b->start = THDR_ROOM - pm->pos;
memcpy(b->data + b->start, pm->data, pm->space);
} else
/* fragment - only take the payload */
memcpy(b->data + THDR_ROOM + start, pm->data + pm->pos, end - start);
return CW_TRUE;
}
/*
* parse a sigle fragment. If it is the last fragment we need or the only fragment, return the reassembled message in
* *reassembleMsg. If we need at lest one more fragment, save this fragment in the buffer. You then call this function again
* with a new fragment and the same buffer until we got all the fragments.
*/
CWBool CWProtocolParseFragment(CWProtocolMessage *msg, CWFragmentBufferList* frag_buffer, CWProtocolMessage *pm)
{
assert(msg != NULL);
assert(frag_buffer != NULL);
assert(pm != NULL);
if (!CWParseInitTransportHeader(msg)) {
CWDebugLog("CWParseTransportHeader failed");
return CW_FALSE;
}
if (!CWTransportHeaderIsFragment(msg)) { // single fragment
/* consume msg */
if (!msg->is_static) {
ralloc_steal(NULL, msg->data);
CWInitTransportMessage(pm, msg->data, msg->space, 0);
} else {
unsigned char *buf;
buf = ralloc_memdup(NULL, msg->data, msg->space);
CWInitTransportMessage(pm, buf, msg->space, 0);
}
#ifdef HAVE_VALGRIND_MEMCHECK_H
VALGRIND_MAKE_MEM_UNDEFINED(msg, sizeof(CWProtocolMessage));
#endif
} else {
CWFragmentBuffer *b;
CWBool done;
unsigned int base = frag_buffer->base;
unsigned int frag_id = CWTransportHeaderFragmentId(msg);
CWDebugLog("Received Fragment ID:%d, offset:%d, notLast:%d",
CWTransportHeaderFragmentId(msg),
CWTransportHeaderFragmentOffset(msg) * 8,
CWTransportHeaderIsLast(msg));
if (base > 0x8000 && frag_id < (base - 0x8000))
/* 16bit wrap */
frag_id += 0x10000;
CWDebugLog("Fragment Buffer: base: %d, Id: %d", base, frag_id);
if (frag_id < base) {
/* fragment to old */
CWDebugLog("Fragment too old");
return CW_FALSE;
}
if (frag_id - base > MAX_FRAGMENTS)
base = frag_id - MAX_FRAGMENTS;
b = frag_buffer->slot + frag_id % MAX_FRAGMENTS;
if (b->fragment_id != CWTransportHeaderFragmentId(msg)) {
ralloc_free(b->data);
CW_ZERO_MEMORY(b, sizeof(CWFragmentBuffer));
}
if (!b->data)
if (!(b->data = rzalloc_size(NULL, FRGMT_BUFFER)))
CWErrorRaise(CW_ERROR_OUT_OF_MEMORY, NULL);
b->fragment_id = CWTransportHeaderFragmentId(msg);
done = CWAddFragment(b, msg);
CWReleaseMessage(msg);
if (!done || b->length == 0 || b->count != 1 ||
b->parts[0].start != 0 || b->parts[0].end != b->length)
/* we need at least one more fragment */
return CWErrorRaise(CW_ERROR_NEED_RESOURCE, NULL);
CWInitTransportMessage(pm, b->data, b->length + THDR_ROOM, 0);
pm->pos = b->start;
ralloc_steal(NULL, b->data);
/* nuke the old buffer and advance base fragment id */
CW_ZERO_MEMORY(b, sizeof(CWFragmentBuffer));
if (frag_buffer->base == b->fragment_id)
frag_buffer->base++;
}
return CW_TRUE;
}
CWBool CWParseTransportHeader(CWProtocolMessage *pm, CWProtocolTransportHeaderValues *th, unsigned char *RadioMAC)
{
assert(pm != NULL);
assert(th != NULL);
if (pm->space - pm->pos < sizeof(CWTransportHeader))
return CW_FALSE;
pm->start[pm->level++] = pm->pos;
pm->pos += sizeof(CWTransportHeader); /* skip fixed header part */
if (CWTransportHeaderVersion(pm) != CW_PROTOCOL_VERSION)
return CWErrorRaise(CW_ERROR_INVALID_FORMAT, "Wrong Protocol Version");
CWDebugLog("VERSION: %d", CWTransportHeaderVersion(pm));
if (CWTransportHeaderMFlag(pm)) {
unsigned char length;
length = CWProtocolRetrieve8(pm);
if (length != 6)
return CWErrorRaise(CW_ERROR_INVALID_FORMAT, "Invalid MAC length");
if (RadioMAC)
CWProtocolCopyRawBytes(RadioMAC, pm, length);
else
pm->pos += length;
CWMessageAlignTo(pm, 4);
}
if (CWTransportHeaderWFlag(pm))
th->bindingValuesPtr = CWParseTransportHeaderBinding(pm);
if (pm->pos - pm->start[0] != CWTransportHeaderHeaderLen(pm) * 4)
return CWErrorRaise(CW_ERROR_INVALID_FORMAT, "Invalid Header");
return CW_TRUE;
}
// Parse Control Header
CWBool CWParseControlHeader(CWProtocolMessage *pm, CWControlHeaderValues * valPtr)
{
unsigned char flags;
if (pm == NULL || valPtr == NULL)
return CWErrorRaise(CW_ERROR_WRONG_ARG, NULL);
valPtr->messageTypeValue = CWProtocolRetrieve32(pm);
valPtr->seqNum = CWProtocolRetrieve8(pm);
valPtr->msgElemsLen = CWProtocolRetrieve16(pm);
if ((flags = CWProtocolRetrieve8(pm)) != 0) /* Flags, should be 0 */
CWLog("CWParseControlHeader, Flags should be 0 (zero), actual value: %02x", flags);
#if 0
CWDebugLog("Parse Control Header");
CWDebugLog("MESSAGE_TYPE: %u", valPtr->messageTypeValue);
CWDebugLog("SEQUENCE_NUMBER: %u", valPtr->seqNum );
CWDebugLog("MESSAGE_ELEMENT_LENGTH: %u", valPtr->msgElemsLen );
CWDebugLog("FLAGS: %u", flags);
CWDebugLog(NULL);
#endif
return CW_TRUE;
}
//## Assemble a Message Response containing a Failure (Unrecognized Message) Result Code
CWBool CWAssembleUnrecognizedMessageResponse(CWTransportMessage *tm, int PMTU,
int seqNum, int msgType)
{
CWProtocolMessage msg;
assert(tm);
CWLog("Assembling Unrecognized Message Response...");
if (!CWInitMessage(NULL, &msg, msgType, seqNum) ||
!CWAssembleMsgElemResultCode(NULL, &msg, CW_PROTOCOL_FAILURE_UNRECOGNIZED_REQ))
goto cw_assemble_error;
CWFinalizeMessage(&msg);
if (!CWAssembleMessage(tm, PMTU, &msg))
goto cw_assemble_error;
CWLog("Unrecognized Message Response Assembled");
return CW_TRUE;
cw_assemble_error:
CWReleaseMessage(&msg);
return CW_FALSE;
}
CWBool CWAssembleMsgElemSessionID(const void *ctx, CWProtocolMessage *pm, unsigned char *sessionID)
{
if (pm == NULL)
return CWErrorRaise(CW_ERROR_WRONG_ARG, NULL);
CWInitMsgElem(ctx, pm, 16, CW_MSG_ELEMENT_SESSION_ID_CW_TYPE);
CWProtocolStoreRawBytes(pm, sessionID, 16);
CWFinalizeMsgElem(pm);
return CW_TRUE;
}
CWBool CWParseACName(const void *ctx, CWProtocolMessage *pm, int len, char **valPtr)
{
CWParseMessageElementStart(pm);
*valPtr = CWProtocolRetrieveStr(ctx, pm, len);
if (valPtr == NULL)
return CWErrorRaise(CW_ERROR_OUT_OF_MEMORY, NULL);
// CWDebugLog("AC Name:%s", *valPtr);
return CWParseMessageElementEnd(pm, len);
}
CWBool CWParseWTPRadioAdminState(CWProtocolMessage *pm, int len, CWRadioAdminInfoValues * valPtr)
{
CWParseMessageElementStart(pm);
valPtr->ID = CWProtocolRetrieve8(pm);
valPtr->state = CWProtocolRetrieve8(pm);
//valPtr->cause = CWProtocolRetrieve8(pm);
// CWDebugLog("WTP Radio Admin State: %d - %d - %d", valPtr->ID, valPtr->state, valPtr->cause);
return CWParseMessageElementEnd(pm, len);
}
CWBool CWParseWTPRadioOperationalState(CWProtocolMessage *pm, int len, CWRadioOperationalInfoValues * valPtr)
{
CWParseMessageElementStart(pm);
valPtr->ID = CWProtocolRetrieve8(pm);
valPtr->state = CWProtocolRetrieve8(pm);
valPtr->cause = CWProtocolRetrieve8(pm);
// CWDebugLog("WTP Radio Operational State: %d - %d - %d", valPtr->ID, valPtr->state, valPtr->cause);
return CWParseMessageElementEnd(pm, len);
}
CWBool CWParseFormatMsgElem(CWProtocolMessage * completeMsg, unsigned short int *type, unsigned short int *len)
{
*type = CWProtocolRetrieve16(completeMsg);
*len = CWProtocolRetrieve16(completeMsg);
return CW_TRUE;
}
CWBool CWParseResultCode(CWProtocolMessage *pm, int len, CWProtocolResultCode * valPtr)
{
CWParseMessageElementStart(pm);
*valPtr = CWProtocolRetrieve32(pm);
// CWDebugLog("Result Code: %d", *valPtr);
return CWParseMessageElementEnd(pm, len);
}
void CWWTPResetRadioStatistics(WTPRadioStatisticsInfo * radioStatistics)
{
radioStatistics->lastFailureType = UNKNOWN_TYPE;
radioStatistics->resetCount = 0;
radioStatistics->SWFailureCount = 0;
radioStatistics->HWFailuireCount = 0;
radioStatistics->otherFailureCount = 0;
radioStatistics->unknownFailureCount = 0;
radioStatistics->configUpdateCount = 0;
radioStatistics->channelChangeCount = 0;
radioStatistics->bandChangeCount = 0;
radioStatistics->currentNoiseFloor = 0;
}
unsigned char *CWParseSessionID(CWProtocolMessage *pm, int len)
{
return CWProtocolRetrieveRawBytes(NULL, pm, 16);
}
CWBool CWParseTPIEEE80211WLanHoldTime(CWProtocolMessage *pm, int len, unsigned short int * valPtr)
{
CWParseMessageElementStart(pm);
CWProtocolRetrieve8(pm); // skip RADIO Id
CWProtocolRetrieve8(pm); // skip WLAN Id
*valPtr = CWProtocolRetrieve16(pm);
return CWParseMessageElementEnd(pm, len);
}
CWBool CWParseTPDataChannelDeadInterval(CWProtocolMessage *pm, int len, unsigned short int * valPtr)
{
CWParseMessageElementStart(pm);
*valPtr = CWProtocolRetrieve16(pm);
return CWParseMessageElementEnd(pm, len);
}
CWBool CWParseTPACJoinTimeout(CWProtocolMessage *pm, int len, unsigned short int * valPtr)
{
CWParseMessageElementStart(pm);
*valPtr = CWProtocolRetrieve16(pm);
return CWParseMessageElementEnd(pm, len);
}
| 33.285956 | 124 | 0.655551 |
7f75f19a14bc07bfad034c4fed01609fb0cb655a | 1,746 | h | C | webkit/browser/appcache/appcache_executable_handler.h | pozdnyakov/chromium-crosswalk | 0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2020-05-03T06:33:56.000Z | 2021-11-14T18:39:42.000Z | webkit/browser/appcache/appcache_executable_handler.h | pozdnyakov/chromium-crosswalk | 0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | webkit/browser/appcache/appcache_executable_handler.h | pozdnyakov/chromium-crosswalk | 0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | // Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef WEBKIT_BROWSER_APPCACHE_APPCACHE_EXECUTABLE_HANDLER_H_
#define WEBKIT_BROWSER_APPCACHE_APPCACHE_EXECUTABLE_HANDLER_H_
#include "base/callback.h"
#include "base/memory/scoped_ptr.h"
#include "googleurl/src/gurl.h"
#include "webkit/browser/webkit_storage_browser_export.h"
namespace net {
class IOBuffer;
class URLRequest;
}
namespace appcache {
// An interface that must be provided by the embedder to support this feature.
class WEBKIT_STORAGE_BROWSER_EXPORT AppCacheExecutableHandler {
public:
// A handler can respond in one of 4 ways, if each of the GURL fields
// in 'Response' are empty and use_network is false, an error response is
// synthesized.
struct Response {
GURL cached_resource_url;
GURL redirect_url;
bool use_network;
// TODO: blob + headers would be a good one to provide as well, as it would
// make templating possible.
};
typedef base::Callback<void(const Response&)> ResponseCallback;
// Deletion of the handler cancels all pending callbacks.
virtual ~AppCacheExecutableHandler() {}
virtual void HandleRequest(net::URLRequest* req,
ResponseCallback callback) = 0;
};
// A factory to produce instances.
class WEBKIT_STORAGE_BROWSER_EXPORT AppCacheExecutableHandlerFactory {
public:
virtual scoped_ptr<AppCacheExecutableHandler> CreateHandler(
const GURL& handler_url, net::IOBuffer* handler_source) = 0;
protected:
virtual ~AppCacheExecutableHandlerFactory() {}
};
} // namespace appcache
#endif // WEBKIT_BROWSER_APPCACHE_APPCACHE_EXECUTABLE_HANDLER_H_
| 31.745455 | 79 | 0.762887 |
7f975f21c54d0437fd1d8982e89fa868bd66e783 | 2,093 | h | C | PrivateFrameworks/Memories.framework/MiroVPMetadataRange.h | shaojiankui/iOS10-Runtime-Headers | 6b0d842bed0c52c2a7c1464087b3081af7e10c43 | [
"MIT"
] | 36 | 2016-04-20T04:19:04.000Z | 2018-10-08T04:12:25.000Z | PrivateFrameworks/Memories.framework/MiroVPMetadataRange.h | shaojiankui/iOS10-Runtime-Headers | 6b0d842bed0c52c2a7c1464087b3081af7e10c43 | [
"MIT"
] | null | null | null | PrivateFrameworks/Memories.framework/MiroVPMetadataRange.h | shaojiankui/iOS10-Runtime-Headers | 6b0d842bed0c52c2a7c1464087b3081af7e10c43 | [
"MIT"
] | 10 | 2016-06-16T02:40:44.000Z | 2019-01-15T03:31:45.000Z | /* Generated by RuntimeBrowser
Image: /System/Library/PrivateFrameworks/Memories.framework/Memories
*/
@interface MiroVPMetadataRange : NSObject <NSCopying> {
NSDictionary * _analysisDict;
int _durationInFrames;
unsigned long long _flags;
int _startTimeInFrames;
unsigned long long _type;
NSArray * childRanges;
NSString * memoryIdentifier;
unsigned long long rating;
double score;
}
@property (nonatomic, readonly) NSDictionary *analysisDict;
@property (nonatomic, retain) NSArray *childRanges;
@property (nonatomic, readonly) NSDictionary *dictionary;
@property (nonatomic, readonly) int durationInFrames;
@property (nonatomic, readonly) unsigned long long flags;
@property (nonatomic, retain) NSString *memoryIdentifier;
@property (nonatomic) unsigned long long rating;
@property (nonatomic) double score;
@property (nonatomic, readonly) int startTimeInFrames;
@property (nonatomic, readonly) unsigned long long type;
+ (id)descriptionForFlags:(unsigned long long)arg1;
+ (id)descriptionForType:(unsigned long long)arg1;
+ (void)enumerateByAscendingTypes:(id)arg1 handler:(id /* block */)arg2;
+ (id)mergeRanges:(id)arg1 withRanges:(id)arg2;
+ (id)rangeWithType:(unsigned long long)arg1;
+ (id)rangeWithType:(unsigned long long)arg1 flags:(unsigned long long)arg2 startTimeInFrames:(int)arg3 durationInFrames:(int)arg4 analysisDict:(id)arg5;
- (void).cxx_destruct;
- (id)_description;
- (id)analysisDict;
- (id)childRanges;
- (id)copyWithZone:(struct _NSZone { }*)arg1;
- (id)description;
- (id)dictionary;
- (int)durationInFrames;
- (int)durationOfOverlapWithRange:(id)arg1;
- (int)durationOfOverlapWithStart:(int)arg1 duration:(int)arg2;
- (unsigned long long)flags;
- (id)init;
- (id)initWithDictionary:(id)arg1;
- (bool)isEqual:(id)arg1;
- (id)memoryIdentifier;
- (unsigned long long)rating;
- (double)score;
- (void)setChildRanges:(id)arg1;
- (void)setMemoryIdentifier:(id)arg1;
- (void)setRating:(unsigned long long)arg1;
- (void)setScore:(double)arg1;
- (int)startTimeInFrames;
- (id)transientCopy;
- (unsigned long long)type;
@end
| 34.311475 | 153 | 0.754419 |
fa0a2658a7d7a3bc00e9c618ee3f41ec821e5494 | 241 | h | C | torch-test/mpich-3.4.3/src/mpid/ch4/shm/src/shm_init.h | alchemy315/NoPFS | f3901e963e2301e8a6f1c7aac0511d0cf9a1889d | [
"BSD-3-Clause"
] | null | null | null | torch-test/mpich-3.4.3/src/mpid/ch4/shm/src/shm_init.h | alchemy315/NoPFS | f3901e963e2301e8a6f1c7aac0511d0cf9a1889d | [
"BSD-3-Clause"
] | null | null | null | torch-test/mpich-3.4.3/src/mpid/ch4/shm/src/shm_init.h | alchemy315/NoPFS | f3901e963e2301e8a6f1c7aac0511d0cf9a1889d | [
"BSD-3-Clause"
] | null | null | null | /*
* Copyright (C) by Argonne National Laboratory
* See COPYRIGHT in top-level directory
*/
#ifndef SHM_INIT_H_INCLUDED
#define SHM_INIT_H_INCLUDED
#include <shm.h>
#include "../posix/shm_inline.h"
#endif /* SHM_INIT_H_INCLUDED */
| 18.538462 | 47 | 0.73029 |
9ab134857807e8e6e6a8cb61eed4523026453cbb | 2,402 | h | C | core/src/db/Utils.h | MXDA/milvus | db17edab04ce518de7164c9f0b1bf8ca0747f285 | [
"Apache-2.0"
] | 1 | 2020-08-10T18:28:59.000Z | 2020-08-10T18:28:59.000Z | core/src/db/Utils.h | MXDA/milvus | db17edab04ce518de7164c9f0b1bf8ca0747f285 | [
"Apache-2.0"
] | null | null | null | core/src/db/Utils.h | MXDA/milvus | db17edab04ce518de7164c9f0b1bf8ca0747f285 | [
"Apache-2.0"
] | null | null | null | // Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#pragma once
#include <ctime>
#include <string>
#include "Options.h"
#include "db/Types.h"
#include "db/meta/MetaTypes.h"
namespace milvus {
namespace engine {
namespace snapshot {
class Segment;
class Partition;
class Collection;
} // namespace snapshot
namespace utils {
int64_t
GetMicroSecTimeStamp();
std::string
ConstructCollectionRootPath(const std::string& root_path);
Status
CreateCollectionPath(const DBMetaOptions& options, const std::string& collection_id);
Status
DeleteCollectionPath(const DBMetaOptions& options, const std::string& collection_id, bool force = true);
Status
CreateCollectionFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file);
Status
GetCollectionFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file);
Status
DeleteCollectionFilePath(const DBMetaOptions& options, meta::SegmentSchema& table_file);
Status
DeleteSegment(const DBMetaOptions& options, meta::SegmentSchema& table_file);
Status
GetParentPath(const std::string& path, std::string& parent_path);
bool
IsSameIndex(const CollectionIndex& index1, const CollectionIndex& index2);
bool
IsRawIndexType(int32_t type);
bool
IsBinaryMetricType(int32_t metric_type);
meta::DateT
GetDate(const std::time_t& t, int day_delta = 0);
meta::DateT
GetDate();
meta::DateT
GetDateWithDelta(int day_delta);
struct MetaUriInfo {
std::string dialect_;
std::string username_;
std::string password_;
std::string host_;
std::string port_;
std::string db_name_;
};
Status
ParseMetaUri(const std::string& uri, MetaUriInfo& info);
std::string
GetIndexName(int32_t index_type);
void
SendExitSignal();
void
ExitOnWriteError(Status& status);
void
EraseFromCache(const std::string& item_key);
} // namespace utils
} // namespace engine
} // namespace milvus
| 25.284211 | 113 | 0.774355 |
3c39284a80aae91d0e28632bddf5ee8bcfff20c2 | 7,565 | h | C | tensorflow/compiler/xla/service/instruction_fusion.h | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 56 | 2018-06-21T13:47:23.000Z | 2020-05-13T09:31:47.000Z | tensorflow/compiler/xla/service/instruction_fusion.h | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 6 | 2022-01-15T07:17:47.000Z | 2022-02-14T15:28:22.000Z | tensorflow/compiler/xla/service/instruction_fusion.h | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 15 | 2018-09-06T14:18:32.000Z | 2020-05-14T06:35:30.000Z | #include "absl/container/flat_hash_map.h"
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_INSTRUCTION_FUSION_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_INSTRUCTION_FUSION_H_
#include "tensorflow/compiler/xla/service/fusion_queue.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/hlo_pass_interface.h"
#include "tensorflow/compiler/xla/service/hlo_reachability.h"
#include "tensorflow/core/platform/macros.h"
namespace xla {
// HLO pass which performs instruction fusion. Instructions are fused
// "vertically", meaning producing instructions are fused into their consumers
// with the intent that the loops which compute their values will be fused in
// code generation. Derived classes define ShouldFuse method to select which
// instructions to fuse.
class InstructionFusion : public HloModulePass {
public:
explicit InstructionFusion(
std::function<bool(const HloInstruction& instruction)> is_expensive,
bool may_duplicate = true,
FusionConfigCollection config_collection_mode =
FusionConfigCollection::kOff)
: is_expensive_(is_expensive),
may_duplicate_(may_duplicate),
config_collection_mode_(config_collection_mode) {}
~InstructionFusion() override = default;
absl::string_view name() const override { return "fusion"; }
// Run instruction fusion on the given computation. Returns whether the
// computation was changed (instructions were fused).
StatusOr<bool> Run(HloModule* module) override;
// Returns true if the computation of the given instruction is significantly
// more expensive than just writing all the values of the instructions' result
// array. Expensive operations will not be duplicated.
static bool IsExpensive(const HloInstruction& instruction);
protected:
// Returns a FusionQueue that implements custom order of instructions being
// fused. The default implementation processes consumers in reverse post
// order.
virtual std::unique_ptr<FusionQueue> GetFusionQueue(
HloComputation* computation);
// Returns whether the given producer instruction should be fused into the
// given consumer instruction. producer is necessarily an operand of consumer.
// Derived classes should define this method to specify which instructions
// should be fused. `operand_index` is which operand of the consumer the
// producer is.
//
// Instructions are traversed in reverse post order (computation root to
// leaves). This method is called for each operand of the instruction (where
// the operand is 'producer' and the instruction is 'consumer')
//
// Subtypes can override this with target-specific heuristics.
virtual bool ShouldFuse(HloInstruction* consumer, int64 operand_index);
// Returns whether multi-output fusion can be applied to fuse `producer` into
// `consumer`. In contrast to "regular" fusion, the `producer` is not
// duplicated by multi-output fusion.
virtual bool ShouldFuseIntoMultiOutput(HloInstruction* consumer,
int64 operand_index) {
return false;
}
// Chooses a fusion kind for `producer` and `consumer`.
// Default method chooses `kLoop`.
virtual HloInstruction::FusionKind ChooseKind(const HloInstruction* producer,
const HloInstruction* consumer);
// Fuses producer into consumer.
virtual HloInstruction* Fuse(HloInstruction* producer,
HloInstruction* consumer);
// Creates a new fusion instruction containing `producer` and `consumer`. A
// tuple is added as the fusion instruction's root, which consumes from both,
// `producer` and `consumer`. This style of fusion is referred to as
// multi-output fusion.
virtual HloInstruction* FuseIntoMultiOutput(HloInstruction* producer,
HloInstruction* consumer);
// An "effectively unary" operation is one that has at most one "large"
// input with the others being negligible in terms of memory usage.
// We use "has a smaller true rank than the output" as a heuristic
// for "negligible" memory usage.
bool EffectivelyAtMostUnary(HloInstruction* hlo);
// Returns true if fusing producer into consumer would cause producer to be
// duplicated. This is the case if producer has uses other than consumer.
bool FusionWouldDuplicate(const HloInstruction& producer,
const HloInstruction& consumer) {
return !(producer.users().size() == 1 && consumer.IsUserOf(&producer));
}
bool is_expensive(const HloInstruction& instruction) {
return is_expensive_(instruction);
}
// Whether multi-output fusion would introduce a cycle into the HLO graph.
bool MultiOutputFusionCreatesCycle(HloInstruction* producer,
HloInstruction* consumer);
// Current HloComputation instance the loop fuser is traversing.
HloComputation* computation_;
HloModule* module_;
// Reachability information for the current computation.
std::unique_ptr<HloReachabilityMap> reachability_;
FusionConfigCollection config_collection_mode() {
return config_collection_mode_;
}
private:
// The set of producers whose consumers we cannot fuse into.
using HloInstructionSet = std::unordered_set<HloInstruction*>;
HloInstruction* AddFusionInstruction(HloInstruction* producer,
HloInstruction* consumer);
// Whether or not we can fuse producer into consumer on all paths
// from the producer to the consumer where nodes are HLOs and edges are uses.
//
// A map from <producer, consumer> to a bool is required as the result cache
// to store and query the results of calls to this function, in order to avoid
// repeated computations.
bool CanFuseOnAllPaths(
HloInstruction* producer, HloInstruction* consumer,
const HloInstructionSet& do_not_fuse,
absl::flat_hash_map<std::pair<HloInstruction*, HloInstruction*>, bool>*
result_cache);
// Computes the set of nodes that we do not want to fuse into any of their
// consumers based on a global analysis of the HLO graph.
HloInstructionSet ComputeGloballyUnfusible(
absl::Span<HloInstruction* const> post_order);
// Used to determine if an HLO is expensive. Expensive operations will not be
// duplicated.
std::function<bool(const HloInstruction& instruction)> is_expensive_;
// Returns whether we may duplicate an instruction if we want to fuse it.
bool may_duplicate_;
// Configuration mode.
FusionConfigCollection config_collection_mode_;
TF_DISALLOW_COPY_AND_ASSIGN(InstructionFusion);
};
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_INSTRUCTION_FUSION_H_
| 43.982558 | 80 | 0.735096 |
2cc597b04f54141cdcea0f7bab69e0b0590f47df | 572 | c | C | gcc-gcc-7_3_0-release/gcc/testsuite/gcc.target/i386/pr37434-3.c | best08618/asylo | 5a520a9f5c461ede0f32acc284017b737a43898c | [
"Apache-2.0"
] | 7 | 2020-05-02T17:34:05.000Z | 2021-10-17T10:15:18.000Z | gcc-gcc-7_3_0-release/gcc/testsuite/gcc.target/i386/pr37434-3.c | best08618/asylo | 5a520a9f5c461ede0f32acc284017b737a43898c | [
"Apache-2.0"
] | null | null | null | gcc-gcc-7_3_0-release/gcc/testsuite/gcc.target/i386/pr37434-3.c | best08618/asylo | 5a520a9f5c461ede0f32acc284017b737a43898c | [
"Apache-2.0"
] | 2 | 2020-07-27T00:22:36.000Z | 2021-04-01T09:41:02.000Z | /* { dg-do compile } */
/* { dg-options "-O2 -msse4.1" } */
typedef char __v16qi __attribute__ ((__vector_size__ (16)));
typedef long long __m128i __attribute__ ((__vector_size__ (16)));
__m128i Set_AC4R_SETUP_I( const char *val ) {
char D2073 = *val;
char D2074 = *(val + 1);
char D2075 = *(val + 2);
char D2076 = *(val + 3);
char D2077 = *(val + 4);
char D2078 = *(val + 5);
char D2079 = *(val + 6);
__v16qi D2094 = {D2073, D2074, D2075, D2076, D2077, D2078, D2079, 0,
D2073, D2074, D2075, D2076, D2077, D2078, D2079, 0};
return (__m128i)D2094;
}
| 31.777778 | 71 | 0.620629 |
aacf3afebca913f0ce0a390b6053ba7cd717912e | 1,406 | h | C | inc/core/Span.h | duhone/core | 16b1880a79ce53f8e132a9d576d5c0b64788d865 | [
"MIT"
] | null | null | null | inc/core/Span.h | duhone/core | 16b1880a79ce53f8e132a9d576d5c0b64788d865 | [
"MIT"
] | null | null | null | inc/core/Span.h | duhone/core | 16b1880a79ce53f8e132a9d576d5c0b64788d865 | [
"MIT"
] | null | null | null | #pragma once
#include "core/Log.h"
#include <type_traits>
namespace CR::Core {
// once std::span is available(C++20), get rid of this.
template<typename T>
class Span final {
public:
Span() = default;
template<size_t N>
Span(T (&a_data)[N]) : m_data(a_data), m_size(N) {}
Span(T* a_data, size_t a_size) : m_data(a_data), m_size(a_size) {}
~Span() = default;
Span(const Span&) = default;
Span(Span&&) noexcept = default;
Span& operator=(const Span&) = default;
Span& operator=(Span&&) noexcept = default;
T& operator[](size_t a_index) {
Log::Assert(a_index < m_size, "span: out of bounds");
return m_data[a_index];
}
const T& operator[](size_t a_index) const {
Log::Assert(a_index < m_size, "span: out of bounds");
return m_data[a_index];
}
[[nodiscard]] T* data() { return m_data; }
[[nodiscard]] const T* data() const { return m_data; }
[[nodiscard]] size_t size() const { return m_size; }
[[nodiscard]] T* begin() { return m_data; }
[[nodiscard]] const T* begin() const { return m_data; }
[[nodiscard]] const T* cbegin() const { return m_data; }
[[nodiscard]] T* end() { return m_data + m_size; }
[[nodiscard]] const T* end() const { return m_data + m_size; }
[[nodiscard]] const T* cend() const { return m_data + m_size; }
private:
T* m_data{nullptr};
size_t m_size{0};
};
} // namespace CR::Core
| 26.037037 | 68 | 0.624467 |
5ce66e94a820ffc3a1cb7ab85a1737b1ae5eee76 | 1,790 | h | C | hamonize-admin/core/include/CryptoCore.h | bsairline/hamonize | 6632d93b0149ed300d12c4eeb06cfc4fb01fce92 | [
"Apache-2.0"
] | null | null | null | hamonize-admin/core/include/CryptoCore.h | bsairline/hamonize | 6632d93b0149ed300d12c4eeb06cfc4fb01fce92 | [
"Apache-2.0"
] | 1 | 2022-03-25T19:24:44.000Z | 2022-03-25T19:24:44.000Z | hamonize-admin/core/include/CryptoCore.h | gon1942/hamonize | 0456d934569ad664e9f71c6355424426654caabf | [
"Apache-2.0",
"MIT"
] | null | null | null | /*
* CryptoCore.h - core functions for crypto features
*
* Copyright (c) 2017-2021 Tobias Junghans <tobydox@veyon.io>
*
* This file is part of Veyon - https://veyon.io
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program (see COPYING); if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
*/
#pragma once
#include "VeyonCore.h"
#include <QtCrypto>
// clazy:excludeall=rule-of-three
class VEYON_CORE_EXPORT CryptoCore
{
public:
using KeyGenerator = QCA::KeyGenerator;
using PrivateKey = QCA::PrivateKey;
using PublicKey = QCA::PublicKey;
using SecureArray = QCA::SecureArray;
using PlaintextPassword = SecureArray;
enum {
RsaKeySize = 4096,
ChallengeSize = 128,
};
static constexpr QCA::EncryptionAlgorithm DefaultEncryptionAlgorithm = QCA::EME_PKCS1_OAEP;
static constexpr QCA::SignatureAlgorithm DefaultSignatureAlgorithm = QCA::EMSA3_SHA512;
CryptoCore();
~CryptoCore();
static QByteArray generateChallenge();
QString encryptPassword( const PlaintextPassword& password ) const;
PlaintextPassword decryptPassword( const QString& encryptedPassword ) const;
private:
QCA::Initializer m_qcaInitializer;
PrivateKey m_defaultPrivateKey;
};
| 28.412698 | 92 | 0.756983 |
d6654edeb8626e30ca6c7d2a0dc2c3fc1f8a67b4 | 2,712 | h | C | com/win32comext/authorization/src/PyGSecurityInformation.h | huanyin88/Mod-Pywin32-For-Python3.x-DDE | 992931aa534357d54aaac34077f0128d3a740e5e | [
"Apache-2.0"
] | 3 | 2020-06-18T16:57:44.000Z | 2020-07-21T17:52:06.000Z | com/win32comext/authorization/src/PyGSecurityInformation.h | huanyin88/Mod-Pywin32-For-Python3.x-DDE | 992931aa534357d54aaac34077f0128d3a740e5e | [
"Apache-2.0"
] | null | null | null | com/win32comext/authorization/src/PyGSecurityInformation.h | huanyin88/Mod-Pywin32-For-Python3.x-DDE | 992931aa534357d54aaac34077f0128d3a740e5e | [
"Apache-2.0"
] | null | null | null | // @doc
// This file declares the ISecurityInformation Interface and Gateway for Python.
// Generated by makegw.py
// ---------------------------------------------------
//
// Interface Declaration
#include "aclui.h"
// ---------------------------------------------------
//
// Gateway Declaration
// @object PyGSecurityInformation|Gateway wrapper for the implement-only ISecurityInformation interface
class PyGSecurityInformation : private PyGatewayBase, public ISecurityInformation {
protected:
PyGSecurityInformation(PyObject *instance) : PyGatewayBase(instance)
{
ZeroMemory(&ObjectInfo, sizeof(ObjectInfo));
AccessRights = NULL;
cAccessRights = 0;
InheritTypes = NULL;
cInheritTypes = 0;
ObjectInfoAcquired = FALSE;
}
PYGATEWAY_MAKE_SUPPORT2(PyGSecurityInformation, ISecurityInformation, IID_ISecurityInformation, PyGatewayBase)
PyGSecurityInformation::~PyGSecurityInformation(void);
// ISecurityInformation
// @pymeth GetObjectInformation|Returns information identifying the object
STDMETHOD(GetObjectInformation)(PSI_OBJECT_INFO pObjectInfo);
// @pymeth GetSecurity|Requests the object's current security descriptor
STDMETHOD(GetSecurity)
(SECURITY_INFORMATION RequestedInformation, PSECURITY_DESCRIPTOR *ppSecurityDescriptor, BOOL fDefault);
// @pymeth SetSecurity|Applies the modified security information to the object
STDMETHOD(SetSecurity)(SECURITY_INFORMATION SecurityInformation, PSECURITY_DESCRIPTOR pSecurityDescriptor);
// @pymeth GetAccessRights|Requests the permission flags that will be available for user to set
STDMETHOD(GetAccessRights)
(const GUID *pguidObjectType, DWORD dwFlags, PSI_ACCESS *ppAccess, ULONG *pcAccesses, ULONG *piDefaultAccess);
// @pymeth MapGeneric|Translates generic permission flags into specific flags
STDMETHOD(MapGeneric)(const GUID *pguidObjectType, UCHAR *pAceFlags, ACCESS_MASK *pMask);
// @pymeth GetInheritTypes|Retrieves inheritance flags that will be shown in dialog for containers
STDMETHOD(GetInheritTypes)(PSI_INHERIT_TYPE *ppInheritTypes, ULONG *pcInheritTypes);
// @pymeth PropertySheetPageCallback|Invoked each time a property sheet page is created or destroyed
STDMETHOD(PropertySheetPageCallback)(HWND hwnd, UINT uMsg, SI_PAGE_TYPE uPage);
void FreeObjectInfo(void);
void FreeAccessRights(void);
void FreeInheritTypes(void);
BOOL ObjectInfoAcquired;
// allocated memory for info passed to system, structs and members freed by class destructor
SI_OBJECT_INFO ObjectInfo;
PSI_ACCESS AccessRights;
PSI_INHERIT_TYPE InheritTypes;
ULONG cAccessRights, cInheritTypes;
};
| 45.2 | 114 | 0.745206 |
65346760318fd92c21d67fd2155672ea2378fecc | 430 | h | C | libs/common/include/bitmap.h | yashrajkakkad/resea | 985ddb887b460f422b06a081a878598c79e8d8e0 | [
"CC0-1.0",
"MIT"
] | null | null | null | libs/common/include/bitmap.h | yashrajkakkad/resea | 985ddb887b460f422b06a081a878598c79e8d8e0 | [
"CC0-1.0",
"MIT"
] | null | null | null | libs/common/include/bitmap.h | yashrajkakkad/resea | 985ddb887b460f422b06a081a878598c79e8d8e0 | [
"CC0-1.0",
"MIT"
] | null | null | null | #ifndef __BITMAP_H__
#define __BITMAP_H__
#include <types.h>
#include <string.h>
#define BITS_PER_BYTE 8
#define BITMAP_SIZE(n) (ALIGN_UP(n, BITS_PER_BYTE) / BITS_PER_BYTE)
void bitmap_fill(uint8_t *bitmap, size_t size, int value);
int bitmap_get(uint8_t *bitmap, size_t size, size_t index);
void bitmap_set(uint8_t *bitmap, size_t size, size_t index);
void bitmap_clear(uint8_t *bitmap, size_t size, size_t index);
#endif
| 26.875 | 68 | 0.772093 |
ca91f2765284026a372b0f8447a1aa1027a6df9d | 1,059 | h | C | C_Piscine/day11/ex01/ft_list.h | ReemEzeddin/42_Silicon_Valley-Projects | 448b37e455e92abb2f1ac3cf11345a76200ca32c | [
"MIT"
] | null | null | null | C_Piscine/day11/ex01/ft_list.h | ReemEzeddin/42_Silicon_Valley-Projects | 448b37e455e92abb2f1ac3cf11345a76200ca32c | [
"MIT"
] | null | null | null | C_Piscine/day11/ex01/ft_list.h | ReemEzeddin/42_Silicon_Valley-Projects | 448b37e455e92abb2f1ac3cf11345a76200ca32c | [
"MIT"
] | 1 | 2021-05-13T07:58:52.000Z | 2021-05-13T07:58:52.000Z | /* ************************************************************************** */
/* */
/* ::: :::::::: */
/* ft_list.h :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: reezeddi <marvin@42.f> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2020/09/18 07:10:33 by reezeddi #+# #+# */
/* Updated: 2020/09/18 07:10:45 by reezeddi ### ########.fr */
/* */
/* ************************************************************************** */
#ifndef FT_LIST_H
# define FT_LIST_H
typedef struct s_list
{
struct s_list *next;
void *data;
} t_list;
t_list *ft_create_elem(void *data);
#endif
| 42.36 | 80 | 0.19169 |
aa1d0670838f7ac9d5582de1ad254bd41d9e2604 | 63 | h | C | gen_version.h | spider-pcn/lightning | d20435807840070270688f93a71c6c6e03c4caa5 | [
"MIT"
] | null | null | null | gen_version.h | spider-pcn/lightning | d20435807840070270688f93a71c6c6e03c4caa5 | [
"MIT"
] | null | null | null | gen_version.h | spider-pcn/lightning | d20435807840070270688f93a71c6c6e03c4caa5 | [
"MIT"
] | null | null | null | #define VERSION "v0.8.0-35-g8611354"
#define BUILD_FEATURES ""
| 21 | 36 | 0.746032 |
2c28fb96d1146042aa7f329f1033ad70a186f267 | 7,447 | h | C | ios/web/navigation/navigation_item_impl.h | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ios/web/navigation/navigation_item_impl.h | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ios/web/navigation/navigation_item_impl.h | Ron423c/chromium | 2edf7b980065b648f8b2a6e52193d83832fe36b7 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-03-07T14:20:02.000Z | 2021-03-07T14:20:02.000Z | // Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef IOS_WEB_NAVIGATION_NAVIGATION_ITEM_IMPL_H_
#define IOS_WEB_NAVIGATION_NAVIGATION_ITEM_IMPL_H_
#import <Foundation/Foundation.h>
#include <memory>
#include "base/strings/string16.h"
#include "ios/web/navigation/error_retry_state_machine.h"
#include "ios/web/public/favicon/favicon_status.h"
#import "ios/web/public/navigation/navigation_item.h"
#include "ios/web/public/navigation/referrer.h"
#include "ios/web/public/security/ssl_status.h"
#include "url/gurl.h"
namespace web {
class NavigationItemStorageBuilder;
enum class NavigationInitiationType;
// Implementation of NavigationItem.
class NavigationItemImpl : public web::NavigationItem {
public:
// Creates a default NavigationItemImpl.
NavigationItemImpl();
~NavigationItemImpl() override;
// Since NavigationItemImpls own their facade delegates, there is no implicit
// copy constructor (scoped_ptrs can't be copied), so one is defined here.
NavigationItemImpl(const NavigationItemImpl& item);
// NavigationItem implementation:
int GetUniqueID() const override;
void SetOriginalRequestURL(const GURL& url) override;
const GURL& GetOriginalRequestURL() const override;
void SetURL(const GURL& url) override;
const GURL& GetURL() const override;
void SetReferrer(const web::Referrer& referrer) override;
const web::Referrer& GetReferrer() const override;
void SetVirtualURL(const GURL& url) override;
const GURL& GetVirtualURL() const override;
void SetTitle(const base::string16& title) override;
const base::string16& GetTitle() const override;
void SetPageDisplayState(const PageDisplayState& display_state) override;
const PageDisplayState& GetPageDisplayState() const override;
const base::string16& GetTitleForDisplay() const override;
void SetTransitionType(ui::PageTransition transition_type) override;
ui::PageTransition GetTransitionType() const override;
const FaviconStatus& GetFavicon() const override;
FaviconStatus& GetFavicon() override;
const SSLStatus& GetSSL() const override;
SSLStatus& GetSSL() override;
void SetTimestamp(base::Time timestamp) override;
base::Time GetTimestamp() const override;
void SetUserAgentType(UserAgentType type) override;
UserAgentType GetUserAgentType() const override;
bool HasPostData() const override;
NSDictionary* GetHttpRequestHeaders() const override;
void AddHttpRequestHeaders(NSDictionary* additional_headers) override;
// Serialized representation of the state object that was used in conjunction
// with a JavaScript window.history.pushState() or
// window.history.replaceState() call that created or modified this
// NavigationItem. Intended to be used for JavaScript history operations and
// will be nil in most cases.
void SetSerializedStateObject(NSString* serialized_state_object);
NSString* GetSerializedStateObject() const;
// Whether or not this item was created by calling history.pushState().
void SetIsCreatedFromPushState(bool push_state);
bool IsCreatedFromPushState() const;
// Whether the state for this navigation has been changed by
// history.replaceState().
// TODO(crbug.com/659816): This state is only tracked because of flaky early
// page script injection. Once the root cause of this flake is found, this
// can be removed.
void SetHasStateBeenReplaced(bool replace_state);
bool HasStateBeenReplaced() const;
// Whether this navigation is the result of a hash change.
void SetIsCreatedFromHashChange(bool hash_change);
bool IsCreatedFromHashChange() const;
// Initiation type of this pending navigation. Resets to NONE after commit.
void SetNavigationInitiationType(
web::NavigationInitiationType navigation_initiation_type);
web::NavigationInitiationType NavigationInitiationType() const;
// Whether or not to bypass showing the repost form confirmation when loading
// a POST request. Set to YES for browser-generated POST requests.
void SetShouldSkipRepostFormConfirmation(bool skip);
bool ShouldSkipRepostFormConfirmation() const;
// Whether or not to bypass serializing this item to session storage. Set to
// YES to skip saving this page (and therefore restoring this page).
void SetShouldSkipSerialization(bool skip);
bool ShouldSkipSerialization() const;
// Data submitted with a POST request, persisted for resubmits.
void SetPostData(NSData* post_data);
NSData* GetPostData() const;
// Removes the header for |key| from |http_request_headers_|.
void RemoveHttpRequestHeaderForKey(NSString* key);
// Removes all http headers from |http_request_headers_|.
void ResetHttpRequestHeaders();
// Once a navigation item is committed, we should no longer track
// non-persisted state, as documented on the members below.
void ResetForCommit();
// Returns the state machine that manages the displaying and retrying of load
// error for this item.
ErrorRetryStateMachine& error_retry_state_machine();
// Returns the title string to be used for a page with |url| if that page
// doesn't specify a title.
static base::string16 GetDisplayTitleForURL(const GURL& url);
// Used only by NavigationManagerImpl. SetUntrusted() is only used for
// Visible or LastCommitted NavigationItems where the |url_| may be incorrect
// due to timining problems or bugs in WKWebView.
void SetUntrusted();
bool IsUntrusted();
// Restores the state of the |other| navigation item in this item.
void RestoreStateFromItem(NavigationItem* other);
#ifndef NDEBUG
// Returns a human-readable description of the state for debugging purposes.
NSString* GetDescription() const;
#endif
private:
// The NavigationManItemStorageBuilder functions require access to
// private variables of NavigationItemImpl.
friend NavigationItemStorageBuilder;
int unique_id_;
GURL original_request_url_;
GURL url_;
Referrer referrer_;
GURL virtual_url_;
base::string16 title_;
PageDisplayState page_display_state_;
ui::PageTransition transition_type_;
FaviconStatus favicon_;
SSLStatus ssl_;
base::Time timestamp_;
UserAgentType user_agent_type_;
NSMutableDictionary* http_request_headers_;
NSString* serialized_state_object_;
bool is_created_from_push_state_;
bool has_state_been_replaced_;
bool is_created_from_hash_change_;
bool should_skip_repost_form_confirmation_;
bool should_skip_serialization_;
NSData* post_data_;
ErrorRetryStateMachine error_retry_state_machine_;
// The navigation initiation type of the item. This decides whether the URL
// should be displayed before the navigation commits. It is cleared in
// |ResetForCommit| and not persisted.
web::NavigationInitiationType navigation_initiation_type_;
// Used only by NavigationManagerImpl. |is_untrusted_| is only |true| for
// Visible or LastCommitted NavigationItems where the |url_| may be incorrect
// due to timining problems or bugs in WKWebView.
bool is_untrusted_;
// This is a cached version of the result of GetTitleForDisplay. When the URL,
// virtual URL, or title is set, this should be cleared to force a refresh.
mutable base::string16 cached_display_title_;
// Copy and assignment is explicitly allowed for this class.
};
} // namespace web
#endif // IOS_WEB_NAVIGATION_NAVIGATION_ITEM_IMPL_H_
| 39.611702 | 80 | 0.782328 |
c8fb6c474df5b027b20bd20f467978588bba39eb | 2,440 | h | C | node_modules/sharp/vendor/8.10.6/include/libgsf-1/gsf/gsf-input-impl.h | DABOZE/Queen-Alexa | d24743fd7b48e6e68d08b74c56f00eb73035d1c0 | [
"MIT"
] | 134 | 2019-07-24T20:25:17.000Z | 2022-03-14T03:33:48.000Z | node_modules/sharp/vendor/8.10.6/include/libgsf-1/gsf/gsf-input-impl.h | DABOZE/Queen-Alexa | d24743fd7b48e6e68d08b74c56f00eb73035d1c0 | [
"MIT"
] | 309 | 2021-06-11T06:35:53.000Z | 2022-03-31T13:26:41.000Z | node_modules/sharp/vendor/8.10.6/include/libgsf-1/gsf/gsf-input-impl.h | DABOZE/Queen-Alexa | d24743fd7b48e6e68d08b74c56f00eb73035d1c0 | [
"MIT"
] | 67 | 2018-10-29T09:50:49.000Z | 2022-01-06T07:35:56.000Z | /* vim: set sw=8: -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */
/*
* gsf-input-impl.h: implementation details of GsfInput
*
* Copyright (C) 2002-2006 Jody Goldberg (jody@gnome.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2.1 of the GNU Lesser General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
* USA
*/
#ifndef GSF_INPUT_IMPL_H
#define GSF_INPUT_IMPL_H
#include <sys/stat.h>
#include <gsf/gsf-fwd.h>
G_BEGIN_DECLS
struct _GsfInput {
GObject g_object;
gsf_off_t size, cur_offset;
char *name;
GsfInfile *container;
};
typedef struct {
GObjectClass g_object_class;
GsfInput *(*Dup) (GsfInput *input, GError **err);
const guint8 *(*Read) (GsfInput *input, size_t num_bytes,
guint8 *optional_buffer);
gboolean (*Seek) (GsfInput *input, gsf_off_t offset,
GSeekType whence);
GsfInput *(*OpenSibling) (GsfInput const *input,
char const *name, GError **err);
/* Padding for future expansion */
void (*_gsf_reserved0) (void);
void (*_gsf_reserved1) (void);
void (*_gsf_reserved2) (void);
void (*_gsf_reserved3) (void);
} GsfInputClass;
#define GSF_INPUT_CLASS(k) (G_TYPE_CHECK_CLASS_CAST ((k), GSF_INPUT_TYPE, GsfInputClass))
#define GSF_IS_INPUT_CLASS(k) (G_TYPE_CHECK_CLASS_TYPE ((k), GSF_INPUT_TYPE))
/* protected */
gboolean gsf_input_set_name (GsfInput *input, char const *name);
gboolean gsf_input_set_name_from_filename (GsfInput *input, char const *filename);
gboolean gsf_input_set_container (GsfInput *input, GsfInfile *container);
gboolean gsf_input_set_size (GsfInput *input, gsf_off_t size);
gboolean gsf_input_set_modtime (GsfInput *input, GDateTime *modtime);
gboolean gsf_input_seek_emulate (GsfInput *input, gsf_off_t pos);
gboolean gsf_input_set_modtime_from_stat (GsfInput *input,
const struct stat *st);
G_END_DECLS
#endif /* GSF_INPUT_IMPL_H */
| 32.972973 | 92 | 0.730738 |
08033ba1eabc8f83a1b33b0242cb89c0f5e21aca | 24,837 | c | C | src/monk.c | DikuMUDOmnibus/Fallen-Empires | 8f0ab16e922427ca12dca09851997edf91a6bd83 | [
"DOC"
] | 1 | 2018-08-05T01:49:09.000Z | 2018-08-05T01:49:09.000Z | src/monk.c | DikuMUDOmnibus/Fallen-Empires | 8f0ab16e922427ca12dca09851997edf91a6bd83 | [
"DOC"
] | null | null | null | src/monk.c | DikuMUDOmnibus/Fallen-Empires | 8f0ab16e922427ca12dca09851997edf91a6bd83 | [
"DOC"
] | null | null | null | /***************************************************************************
* Original Diku Mud copyright (C) 1990, 1991 by Sebastian Hammer, *
* Michael Seifert, Hans Henrik St{rfeldt, Tom Madsen, and Katja Nyboe. *
* *
* Merc Diku Mud improvments copyright (C) 1992, 1993 by Michael *
* Chastain, Michael Quan, and Mitchell Tse. *
* *
* In order to use any part of this Merc Diku Mud, you must comply with *
* both the original Diku license in 'license.doc' as well the Merc *
* license in 'license.txt'. In particular, you may not remove either of *
* these copyright notices. *
* *
* Much time and thought has gone into this software and you are *
* benefitting. We hope that you share your changes too. What goes *
* around, comes around. *
***************************************************************************/
#if defined(macintosh)
#include <types.h>
#else
#include <sys/types.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "merc.h"
#include "player.h"
void do_guide( CHAR_DATA *ch, char *argument )
{
CHAR_DATA *victim;
char arg [MAX_INPUT_LENGTH];
argument = one_argument( argument, arg );
if (IS_NPC(ch)) return;
if (!IS_CLASS(ch, CLASS_MONK) )
{
send_to_char("Huh?\n\r",ch);
return;
}
if ( arg[0] == '\0' )
{
send_to_char( "Guide whom?\n\r", ch );
return;
}
if ( ( victim = get_char_room( ch, arg ) ) == NULL )
{
send_to_char( "They aren't here.\n\r", ch );
return;
}
if ( IS_NPC(victim) )
{
send_to_char( "Not on NPC's.\n\r", ch );
return;
}
if ( IS_IMMORTAL(victim) )
{
send_to_char( "Not on Immortals's.\n\r", ch );
return;
}
if ( ch == victim )
{
send_to_char( "You cannot guide yourself.\n\r", ch );
return;
}
if (IS_CLASS(victim, CLASS_MONK))
{
send_to_char( "They are already guided.\n\r", ch );
return;
}
if ( victim->level != LEVEL_AVATAR && !IS_IMMORTAL(victim) )
{
send_to_char( "You can only guide avatars.\n\r", ch );
return;
}
if (IS_CLASS(victim, CLASS_VAMPIRE) || IS_SET(victim->pcdata->stats[UNI_AFF], VAM_MORTAL))
{
send_to_char( "You are unable to guide vampires!\n\r", ch );
return;
}
if (IS_CLASS(victim, CLASS_WEREWOLF))
{
send_to_char( "You are unable to guide werewolves!\n\r", ch );
return;
}
if (IS_CLASS(victim, CLASS_DEMON) || IS_SET(victim->special, SPC_CHAMPION))
{
send_to_char( "You are unable to guide demons!\n\r", ch );
return;
}
if (IS_CLASS(victim, CLASS_HIGHLANDER))
{
send_to_char( "You are unable to guide highlanders.\n\r", ch );
return;
}
if (IS_CLASS(victim, CLASS_NINJA))
{
send_to_char( "You are unable to guide ninjas.\n\r", ch);
return;
}
if (!IS_IMMUNE(victim,IMM_VAMPIRE))
{
send_to_char( "You cannot guide an unwilling person.\n\r", ch );
return;
}
if (ch->exp < 50000)
{
send_to_char("You cannot afford the 50000 exp required to guide them.\n\r",ch);
return;
}
if (victim->exp < 50000)
{
send_to_char("They cannot afford the 50000 exp required to be guided from you.\n\r",ch);
return;
}
ch->exp -= 50000;
victim->exp -= 50000;
act("You guide $N in the ways of god.", ch, NULL, victim, TO_CHAR);
act("$n guide $N in the ways of god.", ch, NULL, victim, TO_NOTVICT);
act("$n guide you in the way of god.", ch, NULL, victim, TO_VICT);
victim->level = LEVEL_MONK;
victim->trust = LEVEL_MONK;
send_to_char( "You are now a monk.\n\r", victim );
free_string(victim->lord);
free_string(victim->clan);
victim->clan=str_dup(ch->clan);
victim->lord = str_dup(ch->name);
victim->class = CLASS_MONK;
victim->pcdata->stats[UNI_GEN] = ch->pcdata->stats[UNI_GEN]+1;
save_char_obj(ch);
save_char_obj(victim);
return;
}
void do_mantra( CHAR_DATA *ch, char *argument )
{
char arg1 [MAX_INPUT_LENGTH];
char buf[MAX_STRING_LENGTH];
int improve;
improve = PMONK;
argument = one_argument( argument, arg1 );
if (IS_NPC(ch)) return;
if (!IS_CLASS(ch, CLASS_MONK))
{
send_to_char("Huh?\n\r",ch);
return;
}
if (arg1[0] == '\0')
{
send_to_char(" Mantras.\n\r",ch);
send_to_char("- Remember to put single quotes ( ' ) around the power.\n\r", ch);
if ( ch->pcdata->powers[PMONK] < 1 ) send_to_char("Eyes of God\n\r", ch);
if ( ch->pcdata->powers[PMONK] < 2 ) send_to_char("Trio Mantra\n\r", ch);
if ( ch->pcdata->powers[PMONK] < 3 ) send_to_char("Sacred Invis\n\r", ch);
if ( ch->pcdata->powers[PMONK] < 4 ) send_to_char("Heart of Oak\n\r", ch);
if ( ch->pcdata->powers[PMONK] < 5 ) send_to_char("Adamantium Hands\n\r", ch);
if ( ch->pcdata->powers[PMONK] < 6 ) send_to_char("Steel Shield\n\r", ch);
if ( ch->pcdata->powers[PMONK] < 7 ) send_to_char("Almighty Favor\n\r", ch);
if ( ch->pcdata->powers[PMONK] < 8 ) send_to_char("Dark Blaze\n\r", ch);
if ( ch->pcdata->powers[PMONK] < 9 ) send_to_char("Celestial Path\n\r", ch);
if ( ch->pcdata->powers[PMONK] < 10) send_to_char("Prayer of the Ages\n\r", ch);
if ( ch->pcdata->powers[PMONK] < 11) send_to_char("Cloak of Life\n\r", ch);
return;
}
if (!str_cmp(arg1,"eyes of god"))
{
if ( ch->pcdata->powers[PMONK] > 0 )
send_to_char("You already have the Eyes of God Mantra.\n\r", ch);
else if ( ch->practice < 10)
send_to_char("You don't have the 10 required primal.\n\r", ch);
else
{
ch->pcdata->powers[improve] += 1;
ch->practice -= 10;
send_to_char("You master the Eyes of God Mantra.\n\r", ch);
}
return;
}
else if (!str_cmp(arg1,"trio mantra"))
{
if ( ch->pcdata->powers[PMONK] > 1 )
send_to_char("You already have the Trio Mantra.\n\r", ch);
else if ( ch->pcdata->powers[PMONK] < 1 )
send_to_char("You need all the mantras before this one to receive this power.\n\r", ch);
else if ( ch->practice < 20)
send_to_char("You don't have the 20 required primal.\n\r", ch);
else
{
ch->pcdata->powers[improve] += 1;
ch->practice -= 20;
send_to_char("You master the Trio Mantra.\n\r", ch);
}
return;
}
else if (!str_cmp(arg1,"sacred invis"))
{
if ( ch->pcdata->powers[PMONK] > 2 )
send_to_char("You already have the Sacred Invis Mantra.\n\r", ch);
else if ( ch->pcdata->powers[PMONK] < 2 )
send_to_char("You need all the mantras before this one to receive this power.\n\r", ch);
else if ( ch->practice < 30)
send_to_char("You don't have the 30 required primal.\n\r", ch);
else
{
ch->pcdata->powers[improve] += 1;
ch->practice -= 30;
send_to_char("You master the Sacred Invis Mantra.\n\r", ch);
}
return;
}
else if (!str_cmp(arg1,"heart of oak"))
{
if ( ch->pcdata->powers[PMONK] > 3 )
send_to_char("You already have the Heart of Oak Mantra.\n\r", ch);
else if ( ch->pcdata->powers[PMONK] < 3 )
send_to_char("You need all the mantras before this one to receive this power.\n\r", ch);
else if ( ch->practice < 40)
send_to_char("You don't have the 40 required primal.\n\r", ch);
else
{
ch->pcdata->powers[improve] += 1;
ch->practice -= 40;
send_to_char("You master the Heart of Oak Mantra.\n\r", ch);
}
return;
}
else if (!str_cmp(arg1,"adamantium hands"))
{
if ( ch->pcdata->powers[PMONK] > 4 )
send_to_char("You already have the Adamantium Hands Mantra.\n\r", ch);
else if ( ch->pcdata->powers[PMONK] < 4 )
send_to_char("You need all the mantras before this one to receive this power.\n\r", ch);
else if ( ch->practice < 50)
send_to_char("You don't have the 50 required primal.\n\r", ch);
else
{
ch->pcdata->powers[improve] += 1;
ch->practice -= 50;
send_to_char("You master the Adamantium Hands Mantra.\n\r", ch);
}
return;
}
else if (!str_cmp(arg1,"steel shield"))
{
if ( ch->pcdata->powers[PMONK] > 5 )
send_to_char("You already have the Steel Shield Mantra.\n\r", ch);
else if ( ch->pcdata->powers[PMONK] < 5 )
send_to_char("You need all the mantras before this one to receive this power.\n\r", ch);
else if ( ch->practice < 60)
send_to_char("You don't have the 60 required primal.\n\r", ch);
else
{
ch->pcdata->powers[improve] += 1;
ch->practice -= 60;
send_to_char("You master the Steel Shield Mantra.\n\r", ch);
}
return;
}
else if (!str_cmp(arg1,"almighty favor"))
{
if ( ch->pcdata->powers[PMONK] > 6 )
send_to_char("You already have the Almighty Favor Mantra.\n\r", ch);
else if ( ch->pcdata->powers[PMONK] < 6 )
send_to_char("You need all the mantras before this one to receive this power.\n\r", ch);
else if ( ch->practice < 70)
send_to_char("You don't have the 70 required primal.\n\r", ch);
else
{
ch->pcdata->powers[improve] += 1;
ch->practice -= 70;
send_to_char("You master the Almighty Favor Mantra.\n\r", ch);
}
return;
}
else if (!str_cmp(arg1,"dark blaze"))
{
if ( ch->pcdata->powers[PMONK] > 7 )
send_to_char("You already have the Dark Blaze Mantra.\n\r", ch);
else if ( ch->pcdata->powers[PMONK] < 7 )
send_to_char("You need all the mantras before this one to receive this power.\n\r", ch);
else if ( ch->practice < 80)
send_to_char("You don't have the 80 required primal.\n\r", ch);
else
{
ch->pcdata->powers[improve] += 1;
ch->practice -= 80;
send_to_char("You master the Dark Blaze Mantra.\n\r", ch);
}
return;
}
else if (!str_cmp(arg1,"celestial path"))
{
if ( ch->pcdata->powers[PMONK] > 8 )
send_to_char("You already have the Celestial Path Mantra.\n\r", ch);
else if ( ch->pcdata->powers[PMONK] < 8 )
send_to_char("You need all the mantras before this one to receive this power.\n\r", ch);
else if ( ch->practice < 90)
send_to_char("You don't have the 90 required primal.\n\r", ch);
else
{
ch->pcdata->powers[improve] += 1;
ch->practice -= 90;
send_to_char("You master the Celestial Path Mantra.\n\r", ch);
}
return;
}
else if (!str_cmp(arg1,"prayer of the ages"))
{
if ( ch->pcdata->powers[PMONK] > 9 )
send_to_char("You already have the Prayer of the Ages Mantra.\n\r", ch);
else if ( ch->pcdata->powers[PMONK] < 9 )
send_to_char("You need all the mantras before this one to receive this power.\n\r", ch);
else if ( ch->practice < 100)
send_to_char("You don't have the 100 required primal.\n\r", ch);
else
{
ch->pcdata->powers[improve] += 1;
ch->practice -= 100;
send_to_char("You master the Prayer of the Ages Mantra.\n\r", ch);
}
return;
}
else if (!str_cmp(arg1,"cloak of life"))
{
if ( ch->pcdata->powers[PMONK] > 10 )
send_to_char("You already have the Cloak of Life Mantra.\n\r", ch);
else if ( ch->pcdata->powers[PMONK] < 10 )
send_to_char("You need all the mantras before this one to receive this power.\n\r", ch);
else if ( ch->practice < 100)
send_to_char("You don't have the 100 required primal.\n\r", ch);
else
{
ch->pcdata->powers[improve] += 1;
ch->practice -= 100;
send_to_char("You master the Cloak of Life Mantra.\n\r", ch);
}
return;
}
sprintf(buf,"Mantras Learned (%d).\n\r", ch->pcdata->powers[PMONK]);
send_to_char(buf,ch);
return;
}
void do_eyesofgod( CHAR_DATA *ch, char *argument )
{
if ( IS_NPC(ch) )
return;
if ( !IS_CLASS(ch, CLASS_MONK) )
{
send_to_char("Huh?\n\r", ch);
return;
}
if ( ch->pcdata->powers[PMONK] < 1 )
{
send_to_char("You have not learned the Eyes of God Mantra.\n\r", ch);
return;
}
if ( IS_SET(ch->act, PLR_HOLYLIGHT) )
{
REMOVE_BIT(ch->act, PLR_HOLYLIGHT);
send_to_char( "Your view shimmers into mortal vision.\n\r", ch );
}
else
{
send_to_char( "God answers your prayers and gives you his vision.\n\r", ch );
SET_BIT(ch->act, PLR_HOLYLIGHT);
}
return;
}
void do_steelshield( CHAR_DATA *ch, char *argument )
{
if ( IS_NPC(ch) )
return;
if (!IS_CLASS(ch, CLASS_MONK) )
{
send_to_char("Huh?\n\r", ch);
return;
}
if ( ch->pcdata->powers[PMONK] < 6 )
{
send_to_char("You have not learned the Steel Shield Mantra.\n\r", ch);
return;
}
/* Remove steel shield in handler.c */
if ( IS_AFFECTED(ch, AFF_STEELSHIELD) )
{
REMOVE_BIT( ch->affected_by, AFF_STEELSHIELD );
send_to_char( "Your steel shield disappears.\n\r", ch );
}
else
{
send_to_char( "You are surrounded by a steel shield\n\r", ch );
SET_BIT(ch->affected_by, AFF_STEELSHIELD);
}
return;
}
/* Monk Targeting in ninja.c */
void do_almightyfavour( CHAR_DATA *ch, char *argument )
{
int sn;
int level;
int spelltype;
if (IS_NPC(ch)) return;
if(!IS_CLASS(ch, CLASS_MONK) )
{
send_to_char("Huh?\n\r", ch );
return;
}
if ( IS_CLASS(ch, CLASS_MONK) && ch->pcdata->powers[PMONK] < 7 )
{
send_to_char("You have not learned the Almighty Favor Mantra.\n\r", ch);
return;
}
if (ch->mana < 1000)
{
send_to_char("You don't have enough mana.\n\r", ch );
return;
}
if ( ( sn = skill_lookup( "godbless" ) ) < 0 ) return;
spelltype = skill_table[sn].target;
level = ch->spl[spelltype] * 0.5;
(*skill_table[sn].spell_fun) ( sn, level, ch, ch );
WAIT_STATE( ch, 12 );
ch->mana = ch->mana - 1000;
return;
}
void do_darkblaze( CHAR_DATA *ch, char *argument )
{
CHAR_DATA *victim;
char arg [MAX_INPUT_LENGTH];
argument = one_argument( argument, arg );
if ( arg[0] == '\0' )
{
send_to_char("DarkBlaze who?\n\r", ch);
return;
}
if (!IS_CLASS(ch, CLASS_MONK) )
{
send_to_char("Huh?\n\r", ch);
return;
}
if ( IS_CLASS(ch, CLASS_MONK) && ch->pcdata->powers[PMONK] < 8 )
{
send_to_char("You have not learned the Dark Blaze Mantra.\n\r", ch);
return;
}
if (ch->mana < 1000)
{
send_to_char("You don't have enough mana.\n\r", ch );
return;
}
if ( ( victim = get_char_room( ch, arg ) ) == NULL )
{
send_to_char( "They aren't here.\n\r", ch );
return;
}
/*
if (victim == ch )
{
send_to_char( "You cannot DarkBlaze yourself!\n\r", ch );
return;
}
*/
if ( !IS_SET(victim->affected_by, AFF_TOTALBLIND) )
{
act("You pray to God and a flash of light blinds $N.",ch,NULL,victim,TO_CHAR);
act("$n prays to God and a flash of light blinds $N",ch,NULL,victim,TO_NOTVICT);
act("$n prays to God and a flash of light blinds you.",ch,NULL,victim,TO_VICT);
SET_BIT(victim->affected_by, AFF_TOTALBLIND);
ch->mana -= 1000;
return;
}
else
send_to_char("They are already blinded!\n\r", ch);
return;
}
void do_mclan( CHAR_DATA *ch, char *argument )
{
char buf[MAX_STRING_LENGTH];
CHAR_DATA *gch;
if (IS_NPC(ch)) return;
if (!IS_CLASS(ch, CLASS_MONK) )
{
send_to_char("Huh?\n\r",ch);
return;
}
if (strlen(ch->clan) < 2)
{
send_to_char("You are an outcast!\n\r",ch);
return;
}
sprintf( buf, "%s Monks:\n\r", ch->clan );
send_to_char( buf, ch );
send_to_char("[ Name ] [ Hits % ] [ Mana % ] [ Move % ]\n\r", ch );
for ( gch = char_list; gch != NULL; gch = gch->next )
{
if ( IS_NPC(gch) ) continue;
if ( !IS_CLASS(gch, CLASS_MONK) ) continue;
if ( !str_cmp(ch->clan,gch->clan) )
{
sprintf( buf,
"[%-16s] [%-6d%3d] [%-6d%3d] [%-6d%3d]\n\r",
capitalize( gch->name ),
gch->hit, (gch->hit * 100 / gch->max_hit ),
gch->mana, (gch->mana * 100 / gch->max_mana),
gch->move, (gch->move * 100 / gch->max_move) );
send_to_char( buf, ch );
}
}
return;
}
void do_sacredinvisibility( CHAR_DATA *ch, char *argument )
{
if (IS_NPC(ch)) return;
if (!IS_CLASS(ch, CLASS_MONK) )
{
send_to_char("Huh?\n\r", ch);
return;
}
if (IS_CLASS(ch, CLASS_MONK) && ch->pcdata->powers[PMONK] < 3 )
{
send_to_char("You have not learned the Sacred Invisibility Mantra.\n\r", ch);
return;
}
if ( IS_SET(ch->act, PLR_WIZINVIS) )
{
REMOVE_BIT(ch->act, PLR_WIZINVIS);
send_to_char( "You appear from a shroud of light.\n\r", ch );
act( "$n appears from a shroud of light.", ch, NULL, NULL, TO_ROOM );
}
else
{
send_to_char( "You shield yourself in a shroud of light.\n\r", ch );
act( "$n is shielded in a shroud of light.", ch, NULL, NULL, TO_ROOM );
SET_BIT(ch->act, PLR_WIZINVIS);
ch->move -= 500;
}
return;
}
void do_celestialpath( CHAR_DATA *ch, char *argument )
{
char arg[MAX_INPUT_LENGTH];
CHAR_DATA *victim;
CHAR_DATA *mount;
one_argument( argument, arg );
if ( !IS_CLASS(ch, CLASS_MONK) )
{
send_to_char("Huh?\n\r", ch);
return;
}
if (IS_CLASS(ch, CLASS_MONK) && ch->pcdata->powers[PMONK] < 9 )
{
send_to_char("You have not learned the Celestial Path Mantra.\n\r", ch);
return;
}
if ( arg[0] == '\0' )
{
send_to_char( "Celestial path to where?\n\r", ch );
return;
}
if ( ch->position == POS_FIGHTING )
{
send_to_char( "No way! You are fighting.\n\r", ch );
return;
}
if ( ( victim = get_char_world( ch, arg ) ) == NULL )
{
send_to_char( "Nobody by that name.\n\r", ch );
return;
}
if ( ch->move < 500 )
{
send_to_char( "You don't have the required 500 move.\n\r", ch);
return;
}
if ( !IS_IMMUNE(victim, IMM_SUMMON) && !IS_NPC(victim))
{
send_to_char("They do not wish to be astraled to!\n\r", ch);
return;
}
act( "You disappear into the celestial plane", ch, NULL, NULL, TO_CHAR );
act( "$n disappears into the celestial plane.", ch, NULL, NULL, TO_ROOM );
char_from_room( ch );
char_to_room( ch, get_room_index(victim->in_room->vnum) );
act( "You appear infront of $N", ch, NULL, victim, TO_CHAR );
act( "$n appears infront of $N.", ch, NULL, victim, TO_ROOM );
ch->move -= 500;
do_look( ch, "auto" );
if ( (mount = victim->mount) == NULL ) return;
char_from_room( mount );
char_to_room( mount, get_room_index(victim->in_room->vnum) );
do_look( mount, "auto" );
return;
}
void do_palmthrust( CHAR_DATA *ch, char *argument )
{
char arg[MAX_INPUT_LENGTH];
CHAR_DATA *victim;
int dam;
int loop;
one_argument( argument, arg );
if ( !IS_CLASS(ch, CLASS_MONK) )
{
send_to_char("Huh?\n\r", ch);
return;
}
if ( arg[0] == '\0' )
{
send_to_char( "Thrust what?\n\r", ch );
return;
}
if ( ( victim = get_char_room( ch, arg ) ) == NULL )
{
send_to_char( "They aren't here.\n\r", ch );
return;
}
if ( IS_SET(ch->in_room->room_flags,ROOM_SAFE) )
{
send_to_char("Not in a safe room!\n\r", ch);
return;
}
if ( ch->move < 100 )
{
send_to_char( "You don't have the required 100 move.\n\r", ch);
return;
}
if ( ch->mana < 100 )
{
send_to_char( "You don't have the required 100 mana.\n\r", ch);
return;
}
loop = 0;
while ( loop < 5 && victim->position > POS_DEAD)
{
loop=loop+1;
dam=number_range(0,50);
if ( dam <= 0)
{
act("Your palm thrust misses $N.", ch, NULL, victim, TO_CHAR);
act("$n's palm thrust misses you.", ch,NULL, victim, TO_VICT);
act("$n's palm thrust misses $N.", ch,NULL, victim, TO_NOTVICT);
}
else if ( dam <= 5 && dam >= 1 )
{
act("Your palm thrust barely hits $N.", ch, NULL, victim, TO_CHAR);
act("$n's palm thrust barely hits you.", ch,NULL, victim, TO_VICT);
act("$n's palm thrust barely hits $N.", ch,NULL, victim, TO_NOTVICT);
act("$n's eyes glow bright red for a moment.",ch,NULL,NULL,TO_ROOM);
spell_blindness( skill_lookup( "blindness" ), ch->level, ch, victim );
}
else if ( dam <= 10 && dam >= 6 )
{
act("Your palm thrust thrusts $N weakly.", ch, NULL, victim, TO_CHAR);
act("$n's palm thrust thrusts you weakly.", ch,NULL, victim, TO_VICT);
act("$n's palm thrust thrusts $N weakly.", ch,NULL, victim, TO_NOTVICT);
act("$n's eyes glow bright blue for a moment.",ch,NULL,NULL,TO_ROOM);
spell_heal( skill_lookup( "heal" ), ch->level, ch, ch );
}
else if ( dam <= 25 && dam >= 11 )
{
act("Your palm thrust thrusts $N.", ch, NULL, victim, TO_CHAR);
act("$n's palm thrust thrusts you.", ch,NULL, victim, TO_VICT);
act("$n's palm thrust thrusts $N.", ch,NULL, victim, TO_NOTVICT);
act("$n's eyes glow bright red for a moment.",ch,NULL,NULL,TO_ROOM);
spell_magic_missile( skill_lookup( "magic missile" ), ch->level, ch, victim );
}
else if ( dam <= 30 && dam >= 26 )
{
act("Your palm thrust thrusts $N hard.", ch, NULL, victim, TO_CHAR);
act("$n's palm thrust thrusts you hard.", ch,NULL, victim, TO_VICT);
act("$n's palm thrust thrusts $N hard.", ch,NULL, victim, TO_NOTVICT);
act("$n's eyes glow bright red for a moment.",ch,NULL,NULL,TO_ROOM);
spell_harm( skill_lookup( "harm" ), ch->level, ch, victim );
}
else if ( dam <= 40 && dam >= 31 )
{
act("Your palm thrust thrusts $N deeply.", ch, NULL, victim, TO_CHAR);
act("$n's palm thrust thrusts you deeply.", ch,NULL, victim, TO_VICT);
act("$n's palm thrust thrusts $N deeply.", ch,NULL, victim, TO_NOTVICT);
act("$n's eyes glow bright red for a moment.",ch,NULL,NULL,TO_ROOM);
spell_faerie_fire( skill_lookup( "faerie fire" ), ch->level, ch, victim );
}
else if ( dam <= 50 && dam >= 41 )
{
act("Your palm thrust blasts through $N's armor.", ch, NULL, victim, TO_CHAR);
act("$n's palm thrust blasts through your armor.", ch,NULL, victim, TO_VICT);
act("$n's palm thrust blasts through $N's armor.", ch,NULL, victim, TO_NOTVICT);
act("$n's eyes glow bright red for a moment.",ch,NULL,NULL,TO_ROOM);
spell_curse( skill_lookup( "curse" ), ch->level, ch, victim );
}
else
send_to_char("Bug: Please inform Kai.\n\r", ch);
victim->hit -= dam;
update_pos(victim);
}
ch->mana -= 100;
ch->move -= 100;
WAIT_STATE( ch, skill_table[gsn_kick].beats );
return;
}
void do_prayofages( CHAR_DATA *ch, char *argument )
{
char arg [MAX_INPUT_LENGTH];
CHAR_DATA *victim;
argument = one_argument( argument, arg );
if (IS_NPC(ch)) return;
if (IS_CLASS(ch, CLASS_MONK))
{
if (ch->pcdata->powers[PMONK] < 10)
{
send_to_char("You haven't learned the Prayer of the Ages mantra.\n\r",ch);
return;
}
}
else
{
send_to_char("Huh?\n\r",ch);
return;
}
if (arg[0] == '\0')
{
send_to_char("Who do you wish to be teleported to?\n\r",ch);
return;
}
if ( ( victim = get_char_world( ch, arg ) ) == NULL )
{
send_to_char( "Nobody by that name.\n\r", ch );
return;
}
if ( IS_NPC(victim) )
{
send_to_char( "Not on NPC's.\n\r", ch );
return;
}
if ( victim == ch )
{
send_to_char( "Nothing happens.\n\r", ch);
return;
}
if (!IS_CLASS(victim, CLASS_MONK))
{
send_to_char( "Nothing happens.\n\r", ch );
return;
}
if (victim->in_room == NULL)
{
send_to_char( "Nothing happens.\n\r", ch );
return;
}
if ( victim->position != POS_STANDING )
{
send_to_char( "You are unable to focus on their location.\n\r", ch );
return;
}
send_to_char("You pray to god and vanish in a flash of light.\n\r",ch);
act("$n utters a prayer to god and vanishes in a flash of light.",ch,NULL,NULL,TO_ROOM);
char_from_room(ch);
char_to_room(ch,victim->in_room);
do_look(ch,"");
send_to_char("You emerge from a portal in the sky.\n\r",ch);
act("$n appears from a portal in the sky.",ch,NULL,NULL,TO_ROOM);
return;
}
| 28.84669 | 94 | 0.575593 |
0843478a5aa82cee24ea9e306b4d2f9257d7b4c9 | 5,153 | h | C | src/3rdparty/Chipmunk2D/include/chipmunk/chipmunk.h | libretro-mirrors/clove | 070706761faaae8c12be0dca8df98f4d128cc903 | [
"MIT"
] | 1 | 2020-07-15T01:58:07.000Z | 2020-07-15T01:58:07.000Z | src/3rdparty/Chipmunk2D/include/chipmunk/chipmunk.h | libretro-mirrors/clove | 070706761faaae8c12be0dca8df98f4d128cc903 | [
"MIT"
] | null | null | null | src/3rdparty/Chipmunk2D/include/chipmunk/chipmunk.h | libretro-mirrors/clove | 070706761faaae8c12be0dca8df98f4d128cc903 | [
"MIT"
] | null | null | null | /* Copyright (c) 2007 Scott Lembcke
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/// @defgroup misc Misc
/// @{
#ifndef CHIPMUNK_HEADER
#define CHIPMUNK_HEADER
#ifdef __cplusplus
extern "C" {
#endif
#ifndef CP_ALLOW_PRIVATE_ACCESS
#define CP_ALLOW_PRIVATE_ACCESS 0
#endif
#if CP_ALLOW_PRIVATE_ACCESS == 1
#define CP_PRIVATE(symbol) symbol
#else
#define CP_PRIVATE(symbol) symbol##_private
#endif
void cpMessage(const char *message, const char *condition, const char *file, int line, int isError);
#ifdef NDEBUG
#define cpAssertWarn(condition, message)
#else
#define cpAssertWarn(condition, message) if(!(condition)) cpMessage(message, #condition, __FILE__, __LINE__, 0)
#endif
// Hard assertions are important and cheap to execute. They are not disabled by compiling as debug.
#define cpAssertHard(condition, message) if(!(condition)) cpMessage(message, #condition, __FILE__, __LINE__, 1)
#ifdef NDEBUG
#define cpAssertSoft(condition, message)
#else
#define cpAssertSoft(condition, message) cpAssertHard(condition, message)
#endif
#include "chipmunk_types.h"
// Allocated size for various Chipmunk buffers
#ifndef CP_BUFFER_BYTES
#define CP_BUFFER_BYTES (32*1024)
#endif
// Chipmunk memory function aliases.
#ifndef cpcalloc
#define cpcalloc calloc
#endif
#ifndef cprealloc
#define cprealloc realloc
#endif
#ifndef cpfree
#define cpfree free
#endif
typedef struct cpArray cpArray;
typedef struct cpHashSet cpHashSet;
typedef struct cpBody cpBody;
typedef struct cpShape cpShape;
typedef struct cpConstraint cpConstraint;
typedef struct cpCollisionHandler cpCollisionHandler;
typedef struct cpArbiter cpArbiter;
typedef struct cpSpace cpSpace;
#include "cpVect.h"
#include "cpBB.h"
#include "cpSpatialIndex.h"
#include "cpBody.h"
#include "cpShape.h"
#include "cpPolyShape.h"
#include "cpArbiter.h"
#include "constraints/cpConstraint.h"
#include "cpSpace.h"
#define CP_VERSION_MAJOR 6
#define CP_VERSION_MINOR 0
#define CP_VERSION_RELEASE 0
/// Version string.
extern const char *cpVersionString;
/// @deprecated
void cpInitChipmunk(void);
/// Calculate the moment of inertia for a circle.
/// @c r1 and @c r2 are the inner and outer diameters. A solid circle has an inner diameter of 0.
cpFloat cpMomentForCircle(cpFloat m, cpFloat r1, cpFloat r2, cpVect offset);
/// Calculate area of a hollow circle.
/// @c r1 and @c r2 are the inner and outer diameters. A solid circle has an inner diameter of 0.
cpFloat cpAreaForCircle(cpFloat r1, cpFloat r2);
/// Calculate the moment of inertia for a line segment.
/// Beveling radius is not supported.
cpFloat cpMomentForSegment(cpFloat m, cpVect a, cpVect b);
/// Calculate the area of a fattened (capsule shaped) line segment.
cpFloat cpAreaForSegment(cpVect a, cpVect b, cpFloat r);
/// Calculate the moment of inertia for a solid polygon shape assuming it's center of gravity is at it's centroid. The offset is added to each vertex.
cpFloat cpMomentForPoly(cpFloat m, int numVerts, const cpVect *verts, cpVect offset);
/// Calculate the signed area of a polygon. A Clockwise winding gives positive area.
/// This is probably backwards from what you expect, but matches Chipmunk's the winding for poly shapes.
cpFloat cpAreaForPoly(const int numVerts, const cpVect *verts);
/// Calculate the natural centroid of a polygon.
cpVect cpCentroidForPoly(const int numVerts, const cpVect *verts);
/// Center the polygon on the origin. (Subtracts the centroid of the polygon from each vertex)
void cpRecenterPoly(const int numVerts, cpVect *verts);
/// Calculate the moment of inertia for a solid box.
cpFloat cpMomentForBox(cpFloat m, cpFloat width, cpFloat height);
#ifdef __cplusplus
}
static inline cpVect operator *(const cpVect v, const cpFloat s){return cpvmult(v, s);}
static inline cpVect operator +(const cpVect v1, const cpVect v2){return cpvadd(v1, v2);}
static inline cpVect operator -(const cpVect v1, const cpVect v2){return cpvsub(v1, v2);}
static inline cpBool operator ==(const cpVect v1, const cpVect v2){return cpveql(v1, v2);}
static inline cpVect operator -(const cpVect v){return cpvneg(v);}
#endif
#endif
//@}
| 32.821656 | 150 | 0.770813 |
ec4d8727acd7520a5ce1f66ced48979d772d9ebe | 13,079 | c | C | apps/sysdaemon/sysdaemon_netmonitor.c | f4grx/hn70ap | c40daa07c5147e47b0fcbc3042632371d6674413 | [
"BSD-3-Clause"
] | 28 | 2017-09-29T20:22:43.000Z | 2022-02-14T15:38:37.000Z | apps/sysdaemon/sysdaemon_netmonitor.c | f4grx/hn70ap | c40daa07c5147e47b0fcbc3042632371d6674413 | [
"BSD-3-Clause"
] | null | null | null | apps/sysdaemon/sysdaemon_netmonitor.c | f4grx/hn70ap | c40daa07c5147e47b0fcbc3042632371d6674413 | [
"BSD-3-Clause"
] | 3 | 2018-03-09T12:17:20.000Z | 2020-11-04T02:19:40.000Z | /****************************************************************************
* hn70ap/apps/sysdaemon/sysdaemon_netmonitor.c
*
* Copyright (C) 2018 Sebastien Lorquet. All rights reserved.
* Author: Sebastien Lorquet <sebastien@lorquet.fr>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <stdint.h>
#include <stdbool.h>
#include <string.h>
#include <pthread.h>
#include <fcntl.h>
#include <errno.h>
#include <semaphore.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <net/if.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <nuttx/net/mii.h>
#include <nuttx/net/ioctl.h>
#include <nuttx/leds/userled.h>
#include <netutils/netlib.h>
#include <netutils/dhcpc.h>
#include <hn70ap/leds.h>
#define NET_DEVNAME "eth0"
#define NETMONITOR_RETRYMSEC 1000
#define SHORT_TIME_SEC 1
#define LONG_TIME_SEC 10
static sem_t g_notify_sem;
/* DHCP stuff */
struct dhcpc_state g_dhcp_state;
/*----------------------------------------------------------------------------*/
static void dhcp_negociate(void)
{
void *handle;
uint8_t mac[IFHWADDRLEN];
int ret;
netlib_getmacaddr(NET_DEVNAME, mac);
handle = dhcpc_open(NET_DEVNAME, &mac, IFHWADDRLEN);
if(!handle)
{
syslog(LOG_ERR, "DHCP client failed\n");
return;
}
syslog(LOG_INFO, "Starting DHCP request\n");
ret = dhcpc_request(handle, &g_dhcp_state);
if(ret != 0)
{
syslog(LOG_INFO, "DHCP request failed: %d errno %d\n", ret, errno);
goto close;
}
/* Apply the result */
syslog(LOG_INFO, "IP addr: %d.%d.%d.%d\n",
g_dhcp_state.ipaddr.s_addr & 0xff,
(g_dhcp_state.ipaddr.s_addr >> 8) & 0xff,
(g_dhcp_state.ipaddr.s_addr >> 16) & 0xff,
g_dhcp_state.ipaddr.s_addr >> 24);
netlib_set_ipv4addr(NET_DEVNAME, &g_dhcp_state.ipaddr);
if (g_dhcp_state.netmask.s_addr != 0)
{
syslog(LOG_INFO, "Net mask: %d.%d.%d.%d\n",
g_dhcp_state.netmask.s_addr & 0xff,
(g_dhcp_state.netmask.s_addr >> 8) & 0xff,
(g_dhcp_state.netmask.s_addr >> 16) & 0xff,
g_dhcp_state.netmask.s_addr >> 24);
netlib_set_ipv4netmask(NET_DEVNAME, &g_dhcp_state.netmask);
}
if (g_dhcp_state.default_router.s_addr != 0)
{
syslog(LOG_INFO, "Default router: %d.%d.%d.%d\n",
g_dhcp_state.default_router.s_addr & 0xff,
(g_dhcp_state.default_router.s_addr >> 8) & 0xff,
(g_dhcp_state.default_router.s_addr >> 16) & 0xff,
g_dhcp_state.default_router.s_addr >> 24);
netlib_set_dripv4addr(NET_DEVNAME, &g_dhcp_state.default_router);
}
#if defined(CONFIG_NET_IPv4) && defined(CONFIG_NETDB_DNSCLIENT)
if (g_dhcp_state.dnsaddr.s_addr != 0)
{
syslog(LOG_INFO, "DNS: %d.%d.%d.%d\n",
g_dhcp_state.dnsaddr.s_addr & 0xff,
(g_dhcp_state.dnsaddr.s_addr >> 8) & 0xff,
(g_dhcp_state.dnsaddr.s_addr >> 16) & 0xff,
g_dhcp_state.dnsaddr.s_addr >> 24);
netlib_set_ipv4dnsaddr(&g_dhcp_state.dnsaddr);
}
#endif
close:
dhcpc_close(handle);
}
/*----------------------------------------------------------------------------*/
static void netmonitor_ifup(void* arg)
{
syslog(LOG_INFO, "Interface is going UP\n");
/* Enable the link LED */
hn70ap_leds_state(LED_MACLINK, LED_STATE_ON);
dhcp_negociate();
}
/*----------------------------------------------------------------------------*/
static void netmonitor_ifdown(void)
{
syslog(LOG_INFO, "Interface is going DOWN\n");
/* Disable the link LED */
hn70ap_leds_state(LED_MACLINK, LED_STATE_OFF);
}
/*----------------------------------------------------------------------------*/
/*
Description: Signal handler to be notified of link status change
*/
static void netmonitor_signal(int signo, FAR siginfo_t *siginfo,
FAR void * context)
{
int semcount;
int ret;
/* What is the count on the semaphore? Don't over-post */
ret = sem_getvalue(&g_notify_sem, &semcount);
if (ret == OK && semcount <= 0)
{
sem_post(&g_notify_sem);
}
}
/*----------------------------------------------------------------------------*/
/*
* Description:
* Monitor link status, gracefully taking the link up and down as the
* link becomes available or as the link is lost.
*/
static void* netmonitor_thread(void *arg)
{
struct timespec abstime;
struct timespec reltime;
struct ifreq ifr;
struct sigaction act;
struct sigaction oact;
bool devup;
int ret;
int sd;
syslog(LOG_INFO, "Entry\n");
/* Initialize the notification semaphore */
DEBUGVERIFY(sem_init(&g_notify_sem, 0, 0));
/* Get a socket descriptor that we can use to communicate with the network
* interface driver.
*/
sd = socket(AF_INET, SOCK_DGRAM, 0);
if (sd < 0)
{
ret = -errno;
DEBUGASSERT(ret < 0);
syslog(LOG_ERR, "Failed to create a socket: %d\n", ret);
goto errout;
}
/* Attach a signal handler so that we do not lose PHY events */
act.sa_sigaction = netmonitor_signal;
act.sa_flags = SA_SIGINFO;
ret = sigaction(SIGUSR2, &act, &oact);
if (ret < 0)
{
ret = -errno;
DEBUGASSERT(ret < 0);
syslog(LOG_ERR, "sigaction() failed: %d\n", ret);
goto errout_with_socket;
}
/* Now loop, waiting for changes in link status */
for (;;)
{
/* Configure to receive a signal on changes in link status */
strncpy(ifr.ifr_name, NET_DEVNAME, IFNAMSIZ);
ifr.ifr_mii_notify_pid = 0; /* PID=0 means this task */
ifr.ifr_mii_notify_signo = SIGUSR2;
ifr.ifr_mii_notify_arg = NULL;
ret = ioctl(sd, SIOCMIINOTIFY, (unsigned long)&ifr);
if (ret < 0)
{
ret = -errno;
DEBUGASSERT(ret < 0);
syslog(LOG_ERR, "ioctl(SIOCMIINOTIFY) failed: %d\n", ret);
goto errout_with_sigaction;
}
/* Does the driver think that the link is up or down? */
ret = ioctl(sd, SIOCGIFFLAGS, (unsigned long)&ifr);
if (ret < 0)
{
ret = -errno;
DEBUGASSERT(ret < 0);
syslog(LOG_ERR, "ioctl(SIOCGIFFLAGS) failed: %d\n", ret);
goto errout_with_notification;
}
devup = ((ifr.ifr_flags & IFF_UP) != 0);
/* Get the current PHY address in use. This probably does not change,
* but just in case...
*
* NOTE: We are assuming that the network device name is preserved in
* the ifr structure.
*/
ret = ioctl(sd, SIOCGMIIPHY, (unsigned long)&ifr);
if (ret < 0)
{
ret = -errno;
syslog(LOG_ERR, "ioctl(SIOCGMIIPHY) failed: %d\n", ret);
goto errout_with_notification;
}
/* Read the PHY status register */
ifr.ifr_mii_reg_num = MII_MSR;
ret = ioctl(sd, SIOCGMIIREG, (unsigned long)&ifr);
if (ret < 0)
{
ret = -errno;
DEBUGASSERT(ret < 0);
syslog(LOG_ERR, "ioctl(SIOCGMIIREG) failed: %d\n", ret);
goto errout_with_notification;
}
//syslog(LOG_INFO, "%s: devup=%d PHY address=%02x MSR=%04x",
// ifr.ifr_name, devup, ifr.ifr_mii_phy_id, ifr.ifr_mii_val_out);
/* Check for link up or down */
if ((ifr.ifr_mii_val_out & MII_MSR_LINKSTATUS) != 0)
{
/* Link up... does the drive think that the link is up? */
if (!devup)
{
/* No... We just transitioned from link down to link up.
* Bring the link up.
*/
syslog(LOG_INFO, "Bringing the link up\n");
ifr.ifr_flags = IFF_UP;
ret = ioctl(sd, SIOCSIFFLAGS, (unsigned long)&ifr);
if (ret < 0)
{
ret = -errno;
DEBUGASSERT(ret < 0);
syslog(LOG_ERR, "ioctl(SIOCSIFFLAGS) failed: %d\n", ret);
goto errout_with_notification;
}
netmonitor_ifup(arg);
/* And wait for a short delay. We will want to recheck the
* link status again soon.
*/
reltime.tv_sec = SHORT_TIME_SEC;
reltime.tv_nsec = 0;
}
else
{
/* The link is still up. Take a long, well-deserved rest */
reltime.tv_sec = LONG_TIME_SEC;
reltime.tv_nsec = 0;
}
}
else
{
/* Link down... Was the driver link state already down? */
if (devup)
{
/* No... we just transitioned from link up to link down. Take
* the link down.
*/
syslog(LOG_INFO, "Taking the link down\n");
ifr.ifr_flags = IFF_DOWN;
ret = ioctl(sd, SIOCSIFFLAGS, (unsigned long)&ifr);
if (ret < 0)
{
ret = -errno;
DEBUGASSERT(ret < 0);
syslog(LOG_ERR, "ioctl(SIOCSIFFLAGS) failed: %d\n", ret);
goto errout_with_notification;
}
netmonitor_ifdown();
}
/* In either case, wait for the short, configurable delay */
reltime.tv_sec = NETMONITOR_RETRYMSEC / 1000;
reltime.tv_nsec = (NETMONITOR_RETRYMSEC % 1000) * 1000000;
}
/* Now wait for either the semaphore to be posted for a timed-out to
* occur.
*/
sched_lock();
DEBUGVERIFY(clock_gettime(CLOCK_REALTIME, &abstime));
abstime.tv_sec += reltime.tv_sec;
abstime.tv_nsec += reltime.tv_nsec;
if (abstime.tv_nsec >= 1000000000)
{
abstime.tv_sec++;
abstime.tv_nsec -= 1000000000;
}
(void)sem_timedwait(&g_notify_sem, &abstime);
sched_unlock();
}
/* TODO: Stop the PHY notifications and remove the signal handler. */
errout_with_notification:
# warning Missing logic
errout_with_sigaction:
(void)sigaction(SIGUSR2, &oact, NULL);
errout_with_socket:
close(sd);
errout:
syslog(LOG_ERR, "Aborting\n");
return (void*)ret;
}
/*----------------------------------------------------------------------------*/
/*
Description: Initialize software update operation
- Set mac address and hostname
- Launch netmonitor to setup connectioin and start com
*/
int hn70ap_netmonitor_init(void)
{
uint8_t mac[IFHWADDRLEN];
int ret;
int fd;
//int hostnamelen;
//char hostname[HOST_NAME_MAX+1];
//sethostname(hostname, hostnamelen);
fd = open("/dev/eeprom", O_RDONLY);
if(fd<0)
{
syslog(LOG_ERR, "failed to open EEPROM to read MAC address\n");
memcpy(mac, "123456", 6);
}
ret = lseek(fd, 0xFA, SEEK_SET);
if(ret<0)
{
syslog(LOG_ERR, "failed to seek the EEPROM\n");
return ERROR;
}
ret = read(fd, mac, 6);
if(ret<0)
{
syslog(LOG_ERR, "failed to read the EEPROM\n");
return ERROR;
}
close(fd);
syslog(LOG_INFO, "Set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",mac[0],mac[1],mac[2],mac[3],mac[4],mac[5]);
ret = netlib_setmacaddr(NET_DEVNAME, mac);
if(ret)
{
syslog(LOG_ERR, "Set MAC on ethernet interface FAILED\n");
return ret;
}
ret = pthread_create(NULL, NULL, netmonitor_thread, NULL);
return ret;
}
| 28.556769 | 105 | 0.575732 |
260831c6432e689443f95dc372bcbcd12856d4da | 1,042 | h | C | rrQt/Widgets/TableModel/TableView.h | afoolsbag/rrCnCxx | 1e673bd4edac43d8406a0c726138cba194d17f48 | [
"Unlicense"
] | 2 | 2019-03-20T01:14:10.000Z | 2021-12-08T15:39:32.000Z | rrQt/Widgets/TableModel/TableView.h | afoolsbag/rrCnCxx | 1e673bd4edac43d8406a0c726138cba194d17f48 | [
"Unlicense"
] | null | null | null | rrQt/Widgets/TableModel/TableView.h | afoolsbag/rrCnCxx | 1e673bd4edac43d8406a0c726138cba194d17f48 | [
"Unlicense"
] | null | null | null | //===-- TableView Widget ----------------------------------------*- C++ -*-===//
//!
//! \file
//! \brief Qt 表视图小部件。
//!
//! \version 2021-10-18
//! \since 2019-12-12
//! \authors zhengrr
//! \copyright Unlicense
//!
//===----------------------------------------------------------------------===//
#pragma once
#ifndef TABLE_VIEW_H_
#define TABLE_VIEW_H_
#include <QtWidgets/QTableView>
QT_BEGIN_NAMESPACE
namespace Ui { class TableView; }
QT_END_NAMESPACE
class TableView : public QTableView {
Q_OBJECT
public:
explicit TableView(QWidget *parent = nullptr);
~TableView();
private slots:
void on_TableView_activated(const QModelIndex &index);
void on_TableView_clicked(const QModelIndex &index);
void on_TableView_doubleClicked(const QModelIndex &index);
void on_TableView_entered(const QModelIndex &index);
void on_TableView_iconSizeChanged(const QSize &size);
void on_TableView_pressed(const QModelIndex &index);
void on_TableView_viewportEntered();
private:
Ui::TableView *ui;
};
#endif
| 23.681818 | 80 | 0.637236 |
4d2556b8c7dbe2f58b0252b3ce94c8a1c2b98a60 | 542 | h | C | checkerlib/TyperConsumer.h | lucasreis1/accept | a843321ec7bcf7fb8924a6f345406cce3cfd7819 | [
"MIT"
] | 38 | 2015-02-26T18:45:17.000Z | 2022-03-22T11:28:45.000Z | checkerlib/TyperConsumer.h | uwsampa/accept | 4a8132ce5922c2b21696e1a33387117d44b7ffb9 | [
"MIT"
] | 26 | 2015-01-30T22:29:58.000Z | 2020-05-14T01:37:58.000Z | checkerlib/TyperConsumer.h | lucasreis1/accept | a843321ec7bcf7fb8924a6f345406cce3cfd7819 | [
"MIT"
] | 16 | 2015-03-05T19:24:45.000Z | 2021-11-25T23:53:27.000Z | #ifndef TYPERCONSUMER_H
#define TYPERCONSUMER_H
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/Decl.h"
#include "clang/Frontend/CompilerInstance.h"
class TyperConsumer : public clang::ASTConsumer {
public:
TyperConsumer(clang::CompilerInstance& _ci) :
ci(_ci) {}
clang::CompilerInstance& ci;
virtual void HandleTranslationUnit(clang::ASTContext &Ctx);
void reportError(clang::Stmt* stmt, llvm::StringRef message);
void reportError(clang::Decl* decl, llvm::StringRef message);
};
#endif
| 23.565217 | 63 | 0.752768 |
c1320182521efdecac457c645c05e4ce10008d62 | 4,975 | h | C | prj_def.h | yuri-tnk/samd21_tnkernel3_usb | 355540e80e9376c780c15dfa8077c9b11215cc44 | [
"BSD-3-Clause"
] | null | null | null | prj_def.h | yuri-tnk/samd21_tnkernel3_usb | 355540e80e9376c780c15dfa8077c9b11215cc44 | [
"BSD-3-Clause"
] | null | null | null | prj_def.h | yuri-tnk/samd21_tnkernel3_usb | 355540e80e9376c780c15dfa8077c9b11215cc44 | [
"BSD-3-Clause"
] | null | null | null | /**
*
* Copyright (c) 2013,2021 Yuri Tiomkin
* All Rights Reserved
*
*
* Permission to use, copy, modify, and distribute this software in source
* and binary forms and its documentation for any purpose and without fee
* is hereby granted, provided that the above copyright notice appear
* in all copies and that both that copyright notice and this permission
* notice appear in supporting documentation.
*
*
* THIS SOFTWARE IS PROVIDED BY YURI TIOMKIN "AS IS" AND ANY EXPRESSED OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL YURI TIOMKIN OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/**
* \file
*
* Copyright (c) 2016-2018 Microchip Technology Inc. and its subsidiaries.
*
* \asf_license_start
*
* \page License
*
* Subject to your compliance with these terms, you may use Microchip
* software and any derivatives exclusively with Microchip products.
* It is your responsibility to comply with third party license terms applicable
* to your use of third party software (including open source software) that
* may accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES,
* WHETHER EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE,
* INCLUDING ANY IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY,
* AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT WILL MICROCHIP BE
* LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE, INCIDENTAL OR CONSEQUENTIAL
* LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND WHATSOEVER RELATED TO THE
* SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS BEEN ADVISED OF THE
* POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE FULLEST EXTENT
* ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN ANY WAY
* RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*
* \asf_license_stop
*
*/
#ifndef PRJ_DEF_H_
#define PRJ_DEF_H_
//================ DMA
typedef enum
{
DMAC_CHANNEL_0 = 0,
DMAC_CHANNEL_1 = 1,
} DMAC_CHANNEL;
typedef enum
{
DMAC_TRANSFER_EVENT_COMPLETE, // Data was transferred successfully.
DMAC_TRANSFER_EVENT_ERROR // Error while processing the request
} DMAC_TRANSFER_EVENT;
typedef uint32_t DMAC_CHANNEL_CONFIG;
typedef void (*DMAC_CHANNEL_CALLBACK) (DMAC_TRANSFER_EVENT event, uintptr_t contextHandle);
int bsp_dma_init(void);
int bsp_dma_uart0_rx_init(const void * dstAddr,
int num_items,
int item_size);
int bsp_dma_uart0_tx_init(const void * srcAddr,
int num_items,
int item_size,
const DMAC_CHANNEL_CALLBACK eventHandler);
void bsp_dma_uart0_tx_ch_disable(void);
int bsp_dma_uart0_get_rx_count(void);
void bsp_dma_interrupt_handler(void);
int bsp_dma_uart0_get_btcnt(void);
int bsp_dma_dummy_init(void);
int bsp_dma_dummy_start(void);
//================ UART
typedef struct _UARTINFO
{
TN_SEM tx_str_sem;
TN_SEM tx_rdy_sem;
unsigned int rx_buf_items;
uint8_t * rx_buf;
unsigned int tx_buf_items;
uint8_t * tx_buf;
unsigned int rx_timeout_cnt;
unsigned int rx_timeout;
unsigned int rx_tail;
int state;
} UARTINFO;
typedef enum _UARTBAUDRATE
{
UART_BAUD_9600 = 9600,
UART_BAUD_19200 = 19200,
UART_BAUD_38400 = 38400,
UART_BAUD_115200 = 115200,
} UARTBAUDRATE;
UARTINFO * get_uart0_info(void);
int bsp_uart0_open(uint8_t * rx_buf,
unsigned int rx_buf_items,
uint8_t * tx_buf,
unsigned int tx_buf_items,
UARTBAUDRATE baud_rate);
int bsp_uart0_transmit(uint8_t * data,
int data_size);
int uart0_tx_buf(uint8_t * str, int len);
int uart0_tx_str(char * str);
void uart0_tx_char(unsigned char ch);
int uart0_read(unsigned char * buf,
unsigned int max_len);
void uart_puts(char *s);
unsigned char uart_read_byte(void);
int tn_snprintf( char *outStr, int maxLen, const char *fmt, ... );
void do_itoa(int val, char * buf, int buf_len);
#define uart_tx_str_func uart0_tx_str
#define uart_tx_char_func uart0_tx_char
#define uart_tx_buf_func uart0_tx_buf
#endif /* #define PRJ_DEF_H_ */
| 33.166667 | 92 | 0.697889 |
c15c7e71f6faac140a86e3678b93760b28176662 | 3,408 | h | C | dist/linux/genometools/src/match/sfx-diffcov.h | thmourikis/erv_annotator | 448b2dfc2972f9bf4e743df36fe0232813590fec | [
"MIT"
] | null | null | null | dist/linux/genometools/src/match/sfx-diffcov.h | thmourikis/erv_annotator | 448b2dfc2972f9bf4e743df36fe0232813590fec | [
"MIT"
] | 1 | 2018-09-30T00:45:55.000Z | 2018-09-30T00:45:55.000Z | dist/linux/genometools/src/match/sfx-diffcov.h | thmourikis/erv_annotator | 448b2dfc2972f9bf4e743df36fe0232813590fec | [
"MIT"
] | null | null | null | /*
Copyright (c) 2009 Stefan Kurtz <kurtz@zbh.uni-hamburg.de>
Copyright (c) 2009 Center for Bioinformatics, University of Hamburg
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef SFX_DIFFCOV_H
#define SFX_DIFFCOV_H
#include "core/encseq.h"
#include "core/unused_api.h"
#include "core/readmode.h"
#include "core/logger.h"
#include "core/timer_api.h"
#include "core/error_api.h"
#include "sfx-strategy.h"
#include "sfx-lcpvalues.h"
#include "sfx-suffixgetset.h"
typedef struct GtDifferencecover GtDifferencecover;
/* The following function is used for test purposes only */
void gt_differencecover_check(const GtEncseq *encseq,
GtReadmode readmode);
GtDifferencecover *gt_differencecover_new(unsigned int vparam,
const GtEncseq *encseq,
GtReadmode readmode,
unsigned int outerprefixlength,
GtLogger *logger);
unsigned long gt_differencecover_samplesize(const GtDifferencecover *dcov);
GtDifferencecover *gt_differencecover_prepare_sample(
unsigned int vparam,
const GtEncseq *encseq,
GtReadmode readmode,
unsigned int prefixlength,
const Sfxstrategy *sfxstrategy,
GtOutlcpinfo *outlcpinfosample,
GtLogger *logger,
GtTimer *sfxprogress,
GtError *err);
bool gt_differencecover_is_empty(const GtDifferencecover *dcov);
void gt_differencecover_delete(GtDifferencecover *dcov);
size_t gt_differencecover_requiredspace(const GtDifferencecover *dcov);
void gt_differencecover_sortunsortedbucket(void *data,
unsigned long blisbl,
unsigned long width,
GT_UNUSED unsigned long depth);
void gt_differencecover_completelargelcpvalues(void *data,
const GtSuffixsortspace *sssp,
GtLcpvalues *tableoflcpvalues,
unsigned long width,
unsigned long posoffset);
void gt_differencecover_set_sssp_lcp(GtDifferencecover *dcov,
GtSuffixsortspace *sssp,
GtOutlcpinfo *outlcpinfo);
#endif
| 43.139241 | 77 | 0.586561 |
6cad45a129b14301d67b8f7406a72fa9b997f7fe | 916 | h | C | pubg/PrivateHeaders/FBSDKErrorRecoveryAttempter.h | cara-ksa-so4/PUBC | 1065b983d7bb1c4ad2104c2c4943487c7f17fa44 | [
"MIT"
] | 14 | 2019-07-23T20:33:14.000Z | 2022-03-09T23:29:36.000Z | fc/PrivateHeaders/FBSDKErrorRecoveryAttempter.h | lechium/FControl | 1203a3d6345b5ce9c738d238e0e7075e27c4d21c | [
"MIT"
] | 5 | 2019-07-22T03:59:20.000Z | 2020-03-02T14:50:48.000Z | fc/PrivateHeaders/FBSDKErrorRecoveryAttempter.h | lechium/FControl | 1203a3d6345b5ce9c738d238e0e7075e27c4d21c | [
"MIT"
] | 8 | 2019-07-23T20:35:34.000Z | 2022-03-03T05:51:30.000Z | //
// Generated by class-dump 3.5 (64 bit).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by Steve Nygard.
//
#import "NSObject.h"
#import "FBSDKErrorRecoveryAttempting.h"
@class NSString;
@interface FBSDKErrorRecoveryAttempter : NSObject <FBSDKErrorRecoveryAttempting>
{
}
+ (id)recoveryAttempterFromConfiguration:(id)arg1; // IMP=0x00000001006d53c8
- (void)attemptRecoveryFromError:(id)arg1 optionIndex:(unsigned long long)arg2 delegate:(id)arg3 didRecoverSelector:(SEL)arg4 contextInfo:(void *)arg5; // IMP=0x00000001006d54a4
- (void)completeRecovery:(_Bool)arg1 delegate:(id)arg2 didRecoverSelector:(SEL)arg3 contextInfo:(void *)arg4; // IMP=0x00000001006d54a8
// Remaining properties
@property(readonly, copy) NSString *debugDescription;
@property(readonly, copy) NSString *description;
@property(readonly) unsigned long long hash;
@property(readonly) Class superclass;
@end
| 31.586207 | 177 | 0.771834 |
1e6046ba7e78e86c11075bac0572456b77b82cdb | 12,412 | h | C | assignment-client/src/avatars/ScriptableAvatar.h | Darlingnotin/Antisocial_VR | f1debafb784ed5a63a40fe9b80790fbaccfedfce | [
"Apache-2.0"
] | 1 | 2020-05-02T19:40:21.000Z | 2020-05-02T19:40:21.000Z | assignment-client/src/avatars/ScriptableAvatar.h | Darlingnotin/Antisocial_VR | f1debafb784ed5a63a40fe9b80790fbaccfedfce | [
"Apache-2.0"
] | null | null | null | assignment-client/src/avatars/ScriptableAvatar.h | Darlingnotin/Antisocial_VR | f1debafb784ed5a63a40fe9b80790fbaccfedfce | [
"Apache-2.0"
] | 1 | 2020-06-12T04:41:35.000Z | 2020-06-12T04:41:35.000Z | //
// ScriptableAvatar.h
// assignment-client/src/avatars
//
// Created by Clement on 7/22/14.
// Copyright 2014 High Fidelity, Inc.
//
// Distributed under the Apache License, Version 2.0.
// See the accompanying file LICENSE or http://www.apache.org/licenses/LICENSE-2.0.html
//
#ifndef hifi_ScriptableAvatar_h
#define hifi_ScriptableAvatar_h
#include <AnimationCache.h>
#include <AnimSkeleton.h>
#include <AvatarData.h>
#include <ScriptEngine.h>
#include <EntityItem.h>
/**jsdoc
* The <code>Avatar</code> API is used to manipulate scriptable avatars on the domain. This API is a subset of the
* {@link MyAvatar} API. To enable this API, set {@link Agent|Agent.isAvatar} to <code>true</code>.
*
* <p>For Interface, client entity, and avatar scripts, see {@link MyAvatar}.</p>
*
* @namespace Avatar
*
* @hifi-assignment-client
*
* @comment IMPORTANT: This group of properties is copied from AvatarData.h; they should NOT be edited here.
* @property {Vec3} position - The position of the avatar.
* @property {number} scale=1.0 - The scale of the avatar. The value can be set to anything between <code>0.005</code> and
* <code>1000.0</code>. When the scale value is fetched, it may temporarily be further limited by the domain's settings.
* @property {number} density - The density of the avatar in kg/m<sup>3</sup>. The density is used to work out its mass in
* the application of physics. <em>Read-only.</em>
* @property {Vec3} handPosition - A user-defined hand position, in world coordinates. The position moves with the avatar
* but is otherwise not used or changed by Interface.
* @property {number} bodyYaw - The left or right rotation about an axis running from the head to the feet of the avatar.
* Yaw is sometimes called "heading".
* @property {number} bodyPitch - The rotation about an axis running from shoulder to shoulder of the avatar. Pitch is
* sometimes called "elevation".
* @property {number} bodyRoll - The rotation about an axis running from the chest to the back of the avatar. Roll is
* sometimes called "bank".
* @property {Quat} orientation - The orientation of the avatar.
* @property {Quat} headOrientation - The orientation of the avatar's head.
* @property {number} headPitch - The rotation about an axis running from ear to ear of the avatar's head. Pitch is
* sometimes called "elevation".
* @property {number} headYaw - The rotation left or right about an axis running from the base to the crown of the avatar's
* head. Yaw is sometimes called "heading".
* @property {number} headRoll - The rotation about an axis running from the nose to the back of the avatar's head. Roll is
* sometimes called "bank".
* @property {Vec3} velocity - The current velocity of the avatar.
* @property {Vec3} angularVelocity - The current angular velocity of the avatar.
* @property {number} audioLoudness - The instantaneous loudness of the audio input that the avatar is injecting into the
* domain.
* @property {number} audioAverageLoudness - The rolling average loudness of the audio input that the avatar is injecting
* into the domain.
* @property {string} displayName - The avatar's display name.
* @property {string} sessionDisplayName - <code>displayName's</code> sanitized and default version defined by the avatar mixer
* rather than Interface clients. The result is unique among all avatars present in the domain at the time.
* @property {boolean} lookAtSnappingEnabled=true - <code>true</code> if the avatar's eyes snap to look at another avatar's
* eyes when the other avatar is in the line of sight and also has <code>lookAtSnappingEnabled == true</code>.
* @property {string} skeletonModelURL - The avatar's FST file.
* @property {AttachmentData[]} attachmentData - Information on the avatar's attachments.
* <p class="important">Deprecated: This property is deprecated and will be removed. Use avatar entities instead.</p>
* @property {string[]} jointNames - The list of joints in the current avatar model. <em>Read-only.</em>
* @property {Uuid} sessionUUID - Unique ID of the avatar in the domain. <em>Read-only.</em>
* @property {Mat4} sensorToWorldMatrix - The scale, rotation, and translation transform from the user's real world to the
* avatar's size, orientation, and position in the virtual world. <em>Read-only.</em>
* @property {Mat4} controllerLeftHandMatrix - The rotation and translation of the left hand controller relative to the
* avatar. <em>Read-only.</em>
* @property {Mat4} controllerRightHandMatrix - The rotation and translation of the right hand controller relative to the
* avatar. <em>Read-only.</em>
* @property {number} sensorToWorldScale - The scale that transforms dimensions in the user's real world to the avatar's
* size in the virtual world. <em>Read-only.</em>
* @property {boolean} hasPriority - <code>true</code> if the avatar is in a "hero" zone, <code>false</code> if it isn't.
* <em>Read-only.</em>
* @property {boolean} hasScriptedBlendshapes=false - <code>true</code> if blend shapes are controlled by scripted actions,
* otherwise <code>false</code>. Set this to <code>true</code> before using the {@link MyAvatar.setBlendshape} method,
* and set back to <code>false</code> after you no longer want scripted control over the blend shapes.
* <p><strong>Note:</strong> This property will automatically be set to true if the Controller system has valid facial
* blend shape actions.</p>
* @property {boolean} hasProceduralBlinkFaceMovement=true - <code>true</code> if avatars blink automatically by animating
* facial blend shapes, <code>false</code> if automatic blinking is disabled. Set to <code>false</code> to fully control
* the blink facial blend shapes via the {@link MyAvatar.setBlendshape} method.
* @property {boolean} hasProceduralEyeFaceMovement=true - <code>true</code> if the facial blend shapes for an avatar's eyes
* adjust automatically as the eyes move, <code>false</code> if this automatic movement is disabled. Set this property
* to <code>true</code> to prevent the iris from being obscured by the upper or lower lids. Set to <code>false</code> to
* fully control the eye blend shapes via the {@link MyAvatar.setBlendshape} method.
* @property {boolean} hasAudioEnabledFaceMovement=true - <code>true</code> if the avatar's mouth blend shapes animate
* automatically based on detected microphone input, <code>false</code> if this automatic movement is disabled. Set
* this property to <code>false</code> to fully control the mouth facial blend shapes via the
* {@link MyAvatar.setBlendshape} method.
*
* @example <caption>Create a scriptable avatar.</caption>
* (function () {
* Agent.setIsAvatar(true);
* print("Position: " + JSON.stringify(Avatar.position)); // 0, 0, 0
* }());
*/
class ScriptableAvatar : public AvatarData, public Dependency {
Q_OBJECT
using Clock = std::chrono::system_clock;
using TimePoint = Clock::time_point;
public:
ScriptableAvatar();
/**jsdoc
* Starts playing an animation on the avatar.
* @function Avatar.startAnimation
* @param {string} url - The animation file's URL. Animation files need to be in glTF or FBX format but only need to
* contain the avatar skeleton and animation data. glTF models may be in JSON or binary format (".gltf" or ".glb" URLs
* respectively).
* <p><strong>Warning:</strong> glTF animations currently do not always animate correctly.</p>
* @param {number} [fps=30] - The frames per second (FPS) rate for the animation playback. 30 FPS is normal speed.
* @param {number} [priority=1] - <em>Not used.</em>
* @param {boolean} [loop=false] - <code>true</code> if the animation should loop, <code>false</code> if it shouldn't.
* @param {boolean} [hold=false] - <em>Not used.</em>
* @param {number} [firstFrame=0] - The frame at which the animation starts.
* @param {number} [lastFrame=3.403e+38] - The frame at which the animation stops.
* @param {string[]} [maskedJoints=[]] - The names of joints that should not be animated.
*/
/// Allows scripts to run animations.
Q_INVOKABLE void startAnimation(const QString& url, float fps = 30.0f, float priority = 1.0f, bool loop = false,
bool hold = false, float firstFrame = 0.0f, float lastFrame = FLT_MAX,
const QStringList& maskedJoints = QStringList());
/**jsdoc
* Stops playing the current animation.
* @function Avatar.stopAnimation
*/
Q_INVOKABLE void stopAnimation();
/**jsdoc
* Gets the details of the current avatar animation that is being or was recently played.
* @function Avatar.getAnimationDetails
* @returns {Avatar.AnimationDetails} The current or recent avatar animation.
* @example <caption>Report the current animation details.</caption>
* var animationDetails = Avatar.getAnimationDetails();
* print("Animation details: " + JSON.stringify(animationDetails));
*/
Q_INVOKABLE AnimationDetails getAnimationDetails();
/**jsdoc
* @comment Uses the base class's JSDoc.
*/
Q_INVOKABLE virtual QStringList getJointNames() const override;
/**jsdoc
* @comment Uses the base class's JSDoc.
*/
/// Returns the index of the joint with the specified name, or -1 if not found/unknown.
Q_INVOKABLE virtual int getJointIndex(const QString& name) const override;
/**jsdoc
* @comment Uses the base class's JSDoc.
*/
Q_INVOKABLE virtual void setSkeletonModelURL(const QUrl& skeletonModelURL) override;
/**jsdoc
* @comment Uses the base class's JSDoc.
*/
int sendAvatarDataPacket(bool sendAll = false) override;
virtual QByteArray toByteArrayStateful(AvatarDataDetail dataDetail, bool dropFaceTracking = false) override;
/**jsdoc
* Gets details of all avatar entities.
* <p><strong>Warning:</strong> Potentially an expensive call. Do not use if possible.</p>
* @function Avatar.getAvatarEntityData
* @returns {AvatarEntityMap} Details of all avatar entities.
* @example <caption>Report the current avatar entities.</caption>
* var avatarEntityData = Avatar.getAvatarEntityData();
* print("Avatar entities: " + JSON.stringify(avatarEntityData));
*/
Q_INVOKABLE AvatarEntityMap getAvatarEntityData() const override;
AvatarEntityMap getAvatarEntityDataNonDefault() const override;
AvatarEntityMap getAvatarEntityDataInternal(bool allProperties) const;
/**jsdoc
* Sets all avatar entities from an object.
* <p><strong>Warning:</strong> Potentially an expensive call. Do not use if possible.</p>
* @function Avatar.setAvatarEntityData
* @param {AvatarEntityMap} avatarEntityData - Details of the avatar entities.
*/
Q_INVOKABLE void setAvatarEntityData(const AvatarEntityMap& avatarEntityData) override;
/**jsdoc
* @comment Uses the base class's JSDoc.
*/
Q_INVOKABLE void updateAvatarEntity(const QUuid& entityID, const QByteArray& entityData) override;
public slots:
/**jsdoc
* @function Avatar.update
* @param {number} deltaTime - Delta time.
* @deprecated This function is deprecated and will be removed.
*/
void update(float deltatime);
/**jsdoc
* @function Avatar.setJointMappingsFromNetworkReply
* @deprecated This function is deprecated and will be removed.
*/
void setJointMappingsFromNetworkReply();
private:
AnimationPointer _animation;
AnimationDetails _animationDetails;
QStringList _maskedJoints;
AnimationPointer _bind; // a sleazy way to get the skeleton, given the various library/cmake dependencies
std::shared_ptr<AnimSkeleton> _animSkeleton;
QHash<QString, int> _fstJointIndices; ///< 1-based, since zero is returned for missing keys
QStringList _fstJointNames; ///< in order of depth-first traversal
QUrl _skeletonFBXURL;
mutable QScriptEngine _scriptEngine;
std::map<QUuid, EntityItemPointer> _entities;
/// Loads the joint indices, names from the FST file (if any)
void updateJointMappings();
quint64 _lastSendAvatarDataTime { 0 };
TimePoint _nextTraitsSendWindow;
};
#endif // hifi_ScriptableAvatar_h
| 52.817021 | 128 | 0.713745 |
37b542de6f9a805f9b6bef32230e78bd6c396911 | 14,554 | c | C | backend/redis/sender.c | lasch/data-broker | dca8a2f3d0bcf891541f5e5073ab95bab15eea9c | [
"Apache-2.0"
] | 19 | 2018-08-30T01:05:01.000Z | 2021-07-07T15:19:36.000Z | backend/redis/sender.c | lasch/data-broker | dca8a2f3d0bcf891541f5e5073ab95bab15eea9c | [
"Apache-2.0"
] | 40 | 2018-10-01T13:09:09.000Z | 2021-06-14T17:36:04.000Z | backend/redis/sender.c | lasch/data-broker | dca8a2f3d0bcf891541f5e5073ab95bab15eea9c | [
"Apache-2.0"
] | 20 | 2018-08-20T20:42:39.000Z | 2021-12-11T05:52:58.000Z | /*
* Copyright © 2018-2021 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifdef __APPLE__
#include <stdlib.h>
#else
#include <malloc.h> // malloc
#endif
#include <stddef.h>
#include <stdio.h>
#include "logutil.h"
#include "../common/completion_queue.h"
#include "redis.h"
#include "create.h"
#include "complete.h"
#include "iterator.h"
typedef struct dbBE_Redis_sender_args
{
dbBE_Redis_context_t *_backend;
int _looping;
} dbBE_Redis_sender_args_t;
int dbBE_Redis_create_send_error( dbBE_Completion_queue_t *cq, dbBE_Redis_request_t *request, int error )
{
dbBE_Completion_t *completion = dbBE_Redis_complete_error( request,
error,
0 );
dbBE_Redis_request_destroy( request );
if( completion != NULL )
{
if( dbBE_Completion_queue_push( cq, completion ) != 0 )
{
free( completion );
dbBE_Redis_request_destroy( request );
fprintf( stderr, "RedisBE: Failed to queue send-error completion.\n" );
}
}
else
fprintf( stderr, "RedisBE: Failed to create send-error completion.\n");
return 0;
}
static inline
int dbBE_Redis_cmd_stage_needs_rekeying( dbBE_Redis_request_t *request )
{
int check = 0;
check += (( request->_step->_stage == 0 ) && ( request->_user->_opcode != DBBE_OPCODE_ITERATOR )); // all first-stage requests need to get checked (except iterators)
check += ( request->_user->_opcode == DBBE_OPCODE_MOVE ); // MOVE cmd needs re-keying for each stage
check += (( request->_user->_opcode == DBBE_OPCODE_NSDETACH ) && ( request->_step->_stage == DBBE_REDIS_NSDETACH_STAGE_DELNS ) );
return check;
}
static
dbBE_Redis_request_t* dbBE_Redis_request_preprocess( dbBE_Redis_context_t *backend, dbBE_Redis_request_t *request )
{
if(( request == NULL ) || ( backend == NULL ))
return request;
if( request->_user->_opcode == DBBE_OPCODE_ITERATOR )
{
dbBE_Redis_iterator_t *it = request->_status.iterator._it;
// if we don't have a status cursor, we assume this is the first call/cursor creation
if( it == NULL )
it = (dbBE_Redis_iterator_t*)request->_user->_key;
// iterator with no data but end-of cycle is invalid
if(( it != NULL ) && ( it->_cache_count == 0 ) && ( it->_connection == NULL ))
{
dbBE_Redis_create_send_error( backend->_compl_q, request, DBR_ERR_ITERATOR );
return NULL;
}
// new iterator
if( it == NULL )
{
unsigned i;
it = dbBE_Redis_iterator_new( backend->_iterators );
if( it == NULL )
{
dbBE_Redis_create_send_error( backend->_compl_q, request, DBR_ERR_ITERATOR );
return NULL;
}
request->_status.iterator._it = it;
// set the first connection index since this is a fresh iterator
dbBE_Redis_connection_t *conn = NULL;
for( i = 0; (i < DBBE_REDIS_MAX_CONNECTIONS); ++i )
{
if(( backend->_conn_mgr->_connections[ i ] != NULL ) &&
(dbBE_Redis_connection_RTR( backend->_conn_mgr->_connections[ i ] ) ))
{
conn = backend->_conn_mgr->_connections[ i ];
break;
}
}
if( i == DBBE_REDIS_MAX_CONNECTIONS )
{
dbBE_Redis_create_send_error( backend->_compl_q, request, DBR_ERR_NOCONNECT );
return NULL;
}
request->_location._type = DBBE_REDIS_REQUEST_LOCATION_TYPE_CONNECTION;
request->_location._data._connection = conn;
it->_connection = conn;
}
else
{
request->_status.iterator._it = it;
request->_location._type = DBBE_REDIS_REQUEST_LOCATION_TYPE_CONNECTION;
request->_location._data._connection = it->_connection;
}
// check cache status and maybe create a request
// needs prefetch or is complete
int needs_prefetch = (( it->_cache_count < (DBBE_REDIS_ITERATOR_CACHE_ENTRIES >> 1 )) && ( dbBE_Redis_iterator_remote_complete( it ) == 0 ));
if( needs_prefetch != 0 )
{
// nothing to do with the request the above init-code or
// the response parser prepares connections and therefore everything should be ready now
}
else
{
// if no prefetch is needed, the request must be completed
if( it->_cache_count > 0 )
{
char *key = dbBE_Redis_iterator_pop_cached_key( it );
dbBE_Redis_iterator_copy_key( request->_user->_sge, key );
if( dbBE_Redis_iterator_complete( it ) )
{
dbBE_Redis_iterator_reset( it );
it = NULL;
}
dbBE_Redis_result_t result;
result._type = dbBE_REDIS_TYPE_INT;
result._data._integer = (int64_t)it;
dbBE_Completion_t *completion = dbBE_Redis_complete_command(
request,
&result, DBR_SUCCESS );
if( completion == NULL )
{
dbBE_Redis_create_send_error( backend->_compl_q, request, DBR_ERR_BE_GENERAL );
return NULL;
}
if( dbBE_Completion_queue_push( backend->_compl_q, completion ) != 0 )
{
free( completion );
dbBE_Redis_request_destroy( request );
fprintf( stderr, "RedisBE: Failed to queue completion.\n" );
return NULL;
}
}
else // iterator is complete/empty/invalid
{
dbBE_Redis_create_send_error( backend->_compl_q, request, DBR_ERR_ITERATOR );
return NULL;
}
dbBE_Redis_request_destroy( request );
request = NULL;
}
}
return request;
}
static
dbBE_Redis_request_t* dbBE_Redis_sender_acquire_request( dbBE_Redis_context_t *backend )
{
// check for any activity according to priority
// - request shelf (anything that had to wait because of broken connections)
// - repeat/multistage/redirect (anything that needs an additional iteration)
// - new user requests
dbBE_Redis_request_t *request = NULL; // todo: pick from shelf
dbBE_Request_t *user_req = NULL;
do
{
if( request == NULL )
request = dbBE_Redis_s2r_queue_pop( backend->_retry_q );
if( request == NULL )
{
user_req = dbBE_Request_queue_pop( backend->_work_q );
if( user_req != NULL )
request = dbBE_Redis_request_allocate( user_req );
}
// if there's really nothing to do: skip
if( request == NULL )
return NULL;
// Check if this request has been cancelled before continuing to process it
if( dbBE_Request_set_delete( backend->_cancellations, request->_user) != 0 )
{
dbBE_Completion_t *completion = dbBE_Redis_complete_cancel( request );
if( completion != NULL )
if( dbBE_Completion_queue_push( backend->_compl_q, completion ) != 0 )
{
free( completion );
dbBE_Redis_request_destroy( request );
fprintf( stderr, "RedisBE: Failed to queue completion.\n" );
// todo: save the status to mark the request for cleanup during the next stages
}
// clean the RedisBE request struct
dbBE_Redis_request_destroy( request );
request = NULL;
}
// preprocess (mainly for iterators where immediate completion is possible)
request = dbBE_Redis_request_preprocess( backend, request );
} while( request == NULL ); // repeat in case there was a cancellation
return request;
}
static
dbBE_Redis_connection_t* dbBE_Redis_sender_find_connection( dbBE_Redis_context_t *backend,
dbBE_Redis_request_t *request )
{
dbBE_Redis_connection_t *conn = NULL;
/*
* Do the location check/retrieval each time and also for multi-stage requests
* because the key might have changed and then the conn-index would be off.
*/
if( dbBE_Redis_cmd_stage_needs_rekeying( request ) != 0 )
{
char keybuffer[ DBBE_REDIS_MAX_KEY_LEN ];
if( dbBE_Redis_create_key( request, keybuffer, DBBE_REDIS_MAX_KEY_LEN ) < 0 )
{
dbBE_Redis_create_send_error( backend->_compl_q, request, DBR_ERR_INVALID );
return NULL;
}
/*
* use locator to retrieve address
* unless it's a redirect (ASK) which directly contains
* a direct connection pointer for temporary requesting a different server
*/
uint16_t slot = dbBE_Redis_locator_hash( keybuffer, strnlen( keybuffer, DBBE_REDIS_MAX_KEY_LEN ) );
if( request->_location._type != DBBE_REDIS_REQUEST_LOCATION_TYPE_CONNECTION )
{
request->_location._data._conn_idx = dbBE_Redis_locator_get_conn_index( backend->_locator, slot );
if( request->_location._data._conn_idx == DBBE_REDIS_LOCATOR_INDEX_INVAL )
request->_location._type = DBBE_REDIS_REQUEST_LOCATION_TYPE_UNKNOWN;
else
request->_location._type = DBBE_REDIS_REQUEST_LOCATION_TYPE_SLOT;
}
if( request->_location._type == DBBE_REDIS_REQUEST_LOCATION_TYPE_UNKNOWN )
{
dbBE_Redis_create_send_error( backend->_compl_q, request, DBR_ERR_NOCONNECT );
return NULL;
}
}
// connection mgr to retrieve the sr_buffer + socket
if( request->_location._type == DBBE_REDIS_REQUEST_LOCATION_TYPE_SLOT )
conn = dbBE_Redis_connection_mgr_get_connection_at( backend->_conn_mgr, request->_location._data._conn_idx );
else
conn = request->_location._data._connection;
return conn;
}
/*
* sender function, creates requests to redis
*/
void* dbBE_Redis_sender( void *args )
{
int rc = 0;
dbBE_Redis_sender_args_t *input = (dbBE_Redis_sender_args_t*)args;
if( args == NULL )
{
errno = EINVAL;
return NULL;
}
if( input->_backend == NULL )
{
fprintf( stderr, "FATAL: No backend defined.");
return NULL;
}
int pending_last = -1;
int request_limit = DBBE_REDIS_COALESCED_MAX * dbBE_Redis_connection_mgr_get_connections( input->_backend->_conn_mgr );
/*
* check server connections,
* fail requests only if situation is not recoverable
*/
if( dbBE_Redis_locator_hash_covered( input->_backend->_locator ) == 0 )
{
dbBE_Redis_connection_recoverable_t recoverable = dbBE_Redis_connection_mgr_conn_recover(
input->_backend->_conn_mgr,
input->_backend->_locator,
&( input->_backend->_cluster_info ) );
switch( recoverable )
{
case DBBE_REDIS_CONNECTION_RECOVERABLE: // recoverable but not yet recovered
goto skip_sending;
break;
case DBBE_REDIS_CONNECTION_RECOVERED: // recovered
// nothing to do, we're good to continue
break;
case DBBE_REDIS_CONNECTION_UNRECOVERABLE: // not recoverable at the moment
LOG(DBG_ERR, stderr, "Unrecoverable cluster connection. Completing all requests as failed.\n")
// intentionally no break
default: // unrecognized
{
// flush queues
dbBE_Redis_request_t *request;
while( ( request = dbBE_Redis_s2r_queue_pop( input->_backend->_retry_q )) != NULL )
dbBE_Redis_create_send_error( input->_backend->_compl_q, request, DBR_ERR_NOCONNECT );
return NULL;
break;
}
}
}
dbBE_Redis_request_t *request = NULL;
int *pending_conn = input->_backend->_sender_connections;
while(( --request_limit > 0 ) && ( pending_last < DBBE_REDIS_COALESCED_MAX * dbBE_Redis_connection_mgr_get_connections( input->_backend->_conn_mgr ) ))
{
request = dbBE_Redis_sender_acquire_request( input->_backend );
if( request == NULL )
break;
// find out which connection to use
dbBE_Redis_connection_t *conn = dbBE_Redis_sender_find_connection( input->_backend, request );
if( conn == NULL )
{
LOG( DBG_ERR, stderr, "Failed to get back-end connection.\n" );
// todo: might have to create completion (unless there are more sub-tasks in flight)
rc = -ENOMSG;
break;
}
if( ! dbBE_Redis_connection_RTS( conn ) )
{
LOG( DBG_ERR, stderr, "Associated connection not ready to send\n" );
rc = -ENOTCONN;
break;
}
// create_command assembles an SGE list
// entries either come directly from user or from send buffer
// when complete, connection.send() fires the assembled data
dbBE_sge_t *cmd = dbBE_Transport_sge_buffer_get_current( conn->_cmd );
rc = dbBE_Redis_create_command_sge( request, input->_backend->_sender_buffer, cmd );
if( rc < 0 )
{
LOG( DBG_ERR, stderr, "Failed to create command. rc=%d\n", rc );
rc = -ENOMSG;
break;
}
// update cmd buffer status for this connection
if( dbBE_Transport_sge_buffer_add( conn->_cmd, rc ) > ( (DBBE_SGE_MAX >> 2) * 3 ))
request_limit = 1; // if we exceed 75% of the SGE space, we better stop to avoid blowing the limit with the next request
// instead of sending, add connection to a pending connections list
if(( pending_last < 0 ) || ( conn->_index != pending_conn[ pending_last ] ))
++pending_last;
pending_conn[ pending_last ] = conn->_index;
pending_conn[ pending_last+1 ] = -1;
// store request to posted requests queue
rc = dbBE_Redis_s2r_queue_push( conn->_posted_q, request );
if( rc != 0 )
{
rc = -ENOMSG;
break;
}
}
skip_sending:
// before triggering the receiver, do the post on all pending connections
while( pending_last >= 0 )
{
rc = dbBE_Redis_connection_send_cmd( dbBE_Redis_connection_mgr_get_connection_at( input->_backend->_conn_mgr, pending_conn[ pending_last ] ) );
if( rc < 0 )
{
LOG( DBG_ERR, stderr, "Failed to send command. rc=%d\n", rc );
break;
}
--pending_last;
}
dbBE_Transport_sr_buffer_reset( input->_backend->_sender_buffer );
// complete the request with an error
//dbBE_Redis_create_error( request, input->_backend->_compl_q );
// clean up
return NULL;
}
void dbBE_Redis_sender_trigger( dbBE_Redis_context_t *backend )
{
dbBE_Redis_sender_args_t *args = (dbBE_Redis_sender_args_t*)malloc( sizeof( dbBE_Redis_sender_args_t ) );
args->_backend = backend;
args->_looping = 1;
dbBE_Redis_sender( (void*) args );
dbBE_Redis_receiver( (void*) args );
free( args );
}
| 33.767981 | 167 | 0.668751 |
1b36a15b9c13873d9f2031cadf1203ebce2866f8 | 979 | h | C | PrivateFrameworks/FrontBoardServices.framework/FBSProcessWatchdogPolicy.h | shaojiankui/iOS10-Runtime-Headers | 6b0d842bed0c52c2a7c1464087b3081af7e10c43 | [
"MIT"
] | 36 | 2016-04-20T04:19:04.000Z | 2018-10-08T04:12:25.000Z | PrivateFrameworks/FrontBoardServices.framework/FBSProcessWatchdogPolicy.h | shaojiankui/iOS10-Runtime-Headers | 6b0d842bed0c52c2a7c1464087b3081af7e10c43 | [
"MIT"
] | null | null | null | PrivateFrameworks/FrontBoardServices.framework/FBSProcessWatchdogPolicy.h | shaojiankui/iOS10-Runtime-Headers | 6b0d842bed0c52c2a7c1464087b3081af7e10c43 | [
"MIT"
] | 10 | 2016-06-16T02:40:44.000Z | 2019-01-15T03:31:45.000Z | /* Generated by RuntimeBrowser
Image: /System/Library/PrivateFrameworks/FrontBoardServices.framework/FrontBoardServices
*/
@interface FBSProcessWatchdogPolicy : NSObject <BSDescriptionProviding, NSCopying> {
NSString * _name;
NSArray * _provisions;
}
@property (readonly, copy) NSString *debugDescription;
@property (readonly, copy) NSString *description;
@property (readonly) unsigned long long hash;
@property (nonatomic, copy) NSString *name;
@property (nonatomic, copy) NSArray *provisions;
@property (readonly) Class superclass;
+ (id)policyWithName:(id)arg1 forProvisions:(id)arg2;
+ (id)policyWithProvisions:(id)arg1;
- (id)copyWithZone:(struct _NSZone { }*)arg1;
- (void)dealloc;
- (id)description;
- (id)descriptionBuilderWithMultilinePrefix:(id)arg1;
- (id)descriptionWithMultilinePrefix:(id)arg1;
- (id)name;
- (id)provisions;
- (void)setName:(id)arg1;
- (void)setProvisions:(id)arg1;
- (id)succinctDescription;
- (id)succinctDescriptionBuilder;
@end
| 29.666667 | 91 | 0.762002 |
f058f73b24c004d17d08bf68152efa3b2335f929 | 46,162 | c | C | wireshark-2.0.13/epan/dissectors/packet-q932.c | mahrukhfida/mi | 7187765aa225e71983969ef5285771ac77c8309a | [
"Apache-2.0"
] | null | null | null | wireshark-2.0.13/epan/dissectors/packet-q932.c | mahrukhfida/mi | 7187765aa225e71983969ef5285771ac77c8309a | [
"Apache-2.0"
] | null | null | null | wireshark-2.0.13/epan/dissectors/packet-q932.c | mahrukhfida/mi | 7187765aa225e71983969ef5285771ac77c8309a | [
"Apache-2.0"
] | null | null | null | /* Do not modify this file. Changes will be overwritten. */
/* Generated automatically by the ASN.1 to Wireshark dissector compiler */
/* packet-q932.c */
/* ../../tools/asn2wrs.py -b -p q932 -c ./q932.cnf -s ./packet-q932-template -D . -O ../../epan/dissectors Addressing-Data-Elements.asn Network-Facility-Extension.asn Network-Protocol-Profile-component.asn Interpretation-component.asn */
/* Input file: packet-q932-template.c */
#line 1 "../../asn1/q932/packet-q932-template.c"
/* packet-q932.c
* Routines for Q.932 packet dissection
* 2007 Tomas Kukosa
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 1998 Gerald Combs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <epan/packet.h>
#include <epan/expert.h>
#include <epan/strutil.h>
#include <epan/asn1.h>
#include <epan/prefs.h>
#include "packet-ber.h"
#include "packet-q932.h"
#define PNAME "Q.932"
#define PSNAME "Q932"
#define PFNAME "q932"
void proto_register_q932(void);
/* Initialize the protocol and registered fields */
static int proto_q932 = -1;
static int hf_q932_ie_type = -1;
static int hf_q932_ie_len = -1;
static int hf_q932_ie_data = -1;
static int hf_q932_pp = -1;
static int hf_q932_nd = -1;
/*--- Included file: packet-q932-hf.c ---*/
#line 1 "../../asn1/q932/packet-q932-hf.c"
static int hf_q932_NetworkFacilityExtension_PDU = -1; /* NetworkFacilityExtension */
static int hf_q932_NetworkProtocolProfile_PDU = -1; /* NetworkProtocolProfile */
static int hf_q932_InterpretationComponent_PDU = -1; /* InterpretationComponent */
static int hf_q932_presentationAlIowedAddress = -1; /* AddressScreened */
static int hf_q932_presentationRestricted = -1; /* NULL */
static int hf_q932_numberNotAvailableDueTolnterworking = -1; /* NULL */
static int hf_q932_presentationRestrictedAddressScreened = -1; /* AddressScreened */
static int hf_q932_presentationAllowedAddress = -1; /* Address */
static int hf_q932_presentationRestrictedAddress = -1; /* Address */
static int hf_q932_presentationAllowedNumberScreened = -1; /* NumberScreened */
static int hf_q932_numberNotAvailableDueToInterworking = -1; /* NULL */
static int hf_q932_presentationRestrictedNumberScreened = -1; /* NumberScreened */
static int hf_q932_presentationAllowedNumber = -1; /* PartyNumber */
static int hf_q932_presentationRestrictedNumber = -1; /* PartyNumber */
static int hf_q932_partyNumber = -1; /* PartyNumber */
static int hf_q932_screeninglndicator = -1; /* ScreeningIndicator */
static int hf_q932_partySubaddress = -1; /* PartySubaddress */
static int hf_q932_screeningIndicator = -1; /* ScreeningIndicator */
static int hf_q932_unknownPartyNumber = -1; /* NumberDigits */
static int hf_q932_publicPartyNumber = -1; /* PublicPartyNumber */
static int hf_q932_nsapEncodedNumber = -1; /* NsapEncodedNumber */
static int hf_q932_dataPartyNumber = -1; /* NumberDigits */
static int hf_q932_telexPartyNumber = -1; /* NumberDigits */
static int hf_q932_privatePartyNumber = -1; /* PrivatePartyNumber */
static int hf_q932_nationalStandardPartyNumber = -1; /* NumberDigits */
static int hf_q932_publicTypeOfNumber = -1; /* PublicTypeOfNumber */
static int hf_q932_publicNumberDigits = -1; /* NumberDigits */
static int hf_q932_privateTypeOfNumber = -1; /* PrivateTypeOfNumber */
static int hf_q932_privateNumberDigits = -1; /* NumberDigits */
static int hf_q932_userSpecifiedSubaddress = -1; /* UserSpecifiedSubaddress */
static int hf_q932_nSAPSubaddress = -1; /* NSAPSubaddress */
static int hf_q932_subaddressInformation = -1; /* SubaddressInformation */
static int hf_q932_oddCountIndicator = -1; /* BOOLEAN */
static int hf_q932_sourceEntity = -1; /* EntityType */
static int hf_q932_sourceEntityAddress = -1; /* AddressInformation */
static int hf_q932_destinationEntity = -1; /* EntityType */
static int hf_q932_destinationEntityAddress = -1; /* AddressInformation */
/*--- End of included file: packet-q932-hf.c ---*/
#line 49 "../../asn1/q932/packet-q932-template.c"
/* Initialize the subtree pointers */
static gint ett_q932 = -1;
static gint ett_q932_ie = -1;
/*--- Included file: packet-q932-ett.c ---*/
#line 1 "../../asn1/q932/packet-q932-ett.c"
static gint ett_q932_PresentedAddressScreened = -1;
static gint ett_q932_PresentedAddressUnscreened = -1;
static gint ett_q932_PresentedNumberScreened = -1;
static gint ett_q932_PresentedNumberUnscreened = -1;
static gint ett_q932_AddressScreened = -1;
static gint ett_q932_NumberScreened = -1;
static gint ett_q932_Address = -1;
static gint ett_q932_PartyNumber = -1;
static gint ett_q932_PublicPartyNumber = -1;
static gint ett_q932_PrivatePartyNumber = -1;
static gint ett_q932_PartySubaddress = -1;
static gint ett_q932_UserSpecifiedSubaddress = -1;
static gint ett_q932_NetworkFacilityExtension_U = -1;
/*--- End of included file: packet-q932-ett.c ---*/
#line 54 "../../asn1/q932/packet-q932-template.c"
static expert_field ei_q932_dse_not_supported = EI_INIT;
static expert_field ei_q932_acse_not_supported = EI_INIT;
static expert_field ei_q932_unknown_component = EI_INIT;
static expert_field ei_q932_asn1_encoded = EI_INIT;
/* Preferences */
/* ROSE context */
static rose_ctx_t q932_rose_ctx;
dissector_table_t qsig_arg_local_dissector_table;
dissector_table_t qsig_res_local_dissector_table;
dissector_table_t qsig_err_local_dissector_table;
dissector_table_t etsi_arg_local_dissector_table;
dissector_table_t etsi_res_local_dissector_table;
dissector_table_t etsi_err_local_dissector_table;
static gint g_facility_encoding = 0; /* Default to QSIG */
void proto_reg_handoff_q932(void);
/* Subdissectors */
static dissector_handle_t q932_ros_handle;
#define Q932_IE_EXTENDED_FACILITY 0x0D
#define Q932_IE_FACILITY 0x1C
#define Q932_IE_NOTIFICATION_INDICATOR 0x27
#define Q932_IE_INFORMATION_REQUEST 0x32
#define Q932_IE_FEATURE_ACTIVATION 0x38
#define Q932_IE_FEATURE_INDICATION 0x39
#define Q932_IE_SERVICE_PROFILE_ID 0x3A
#define Q932_IE_ENDPOINT_IDENTIFIER 0x3B
static const value_string q932_str_ie_type[] = {
{ Q932_IE_EXTENDED_FACILITY , "Extended facility" },
{ Q932_IE_FACILITY , "Facility" },
{ Q932_IE_NOTIFICATION_INDICATOR, "Notification indicator" },
{ Q932_IE_INFORMATION_REQUEST, "Information request" },
{ Q932_IE_FEATURE_ACTIVATION , "Feature activation" },
{ Q932_IE_FEATURE_INDICATION , "Feature indication" },
{ Q932_IE_SERVICE_PROFILE_ID , "Service profile identification" },
{ Q932_IE_ENDPOINT_IDENTIFIER, "Endpoint identifier" },
{ 0, NULL}
};
static const value_string str_pp[] = {
{ 0x11 , "Remote Operations Protocol" },
{ 0x12 , "CMIP Protocol" },
{ 0x13 , "ACSE Protocol" },
{ 0x1F , "Networking extensions" },
{ 0, NULL}
};
static const value_string str_nd[] = {
{ 0x00 , "User suspended" },
{ 0x01 , "User resume" },
{ 0x02 , "Bearer service change" },
{ 0x04 , "Call completion delay" },
{ 0x03 , "Discriminator for extension to ASN.1 encoded component" },
{ 0x40 , "Discriminator for extension to ASN.1 encoded component for ISO" },
{ 0x42 , "Conference established" },
{ 0x43 , "Conference disconnected" },
{ 0x44 , "Other party added" },
{ 0x45 , "Isolated" },
{ 0x46 , "Reattached" },
{ 0x47 , "Other party isolated" },
{ 0x48 , "Other party reattached" },
{ 0x49 , "Other party split" },
{ 0x4A , "Other party disconnected" },
{ 0x4B , "Conference floating" },
{ 0x4C , "Conference disconnected, pre-emption" },
{ 0x4F , "Conference floating, served user pre-empted" },
{ 0x60 , "Call is a waiting call" },
{ 0x68 , "Diversion activated" },
{ 0x69 , "call transferred, alerting" },
{ 0x6A , "call transferred, answered" },
{ 0x6E , "reverse charging (whole call)" },
{ 0x6F , "reverse charging (for the rest of the call)" },
{ 0x74 , "service profile update" },
{ 0x79 , "Remote hold" },
{ 0x7A , "Remote retrieval" },
{ 0x7B , "Call is diverting" },
{ 0, NULL}
};
/*--- Included file: packet-q932-fn.c ---*/
#line 1 "../../asn1/q932/packet-q932-fn.c"
static int
dissect_q932_NumberDigits(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_restricted_string(implicit_tag, BER_UNI_TAG_NumericString,
actx, tree, tvb, offset, hf_index,
NULL);
return offset;
}
static const value_string q932_PublicTypeOfNumber_vals[] = {
{ 0, "unknown" },
{ 1, "internationalNumber" },
{ 2, "nationalNumber" },
{ 3, "networkSpecificNumber" },
{ 4, "subscriberNumber" },
{ 6, "abbreviatedNumber" },
{ 0, NULL }
};
static int
dissect_q932_PublicTypeOfNumber(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_integer(implicit_tag, actx, tree, tvb, offset, hf_index,
NULL);
return offset;
}
static const ber_sequence_t PublicPartyNumber_sequence[] = {
{ &hf_q932_publicTypeOfNumber, BER_CLASS_UNI, BER_UNI_TAG_ENUMERATED, BER_FLAGS_NOOWNTAG, dissect_q932_PublicTypeOfNumber },
{ &hf_q932_publicNumberDigits, BER_CLASS_UNI, BER_UNI_TAG_NumericString, BER_FLAGS_NOOWNTAG, dissect_q932_NumberDigits },
{ NULL, 0, 0, 0, NULL }
};
static int
dissect_q932_PublicPartyNumber(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_sequence(implicit_tag, actx, tree, tvb, offset,
PublicPartyNumber_sequence, hf_index, ett_q932_PublicPartyNumber);
return offset;
}
static int
dissect_q932_NsapEncodedNumber(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_octet_string(implicit_tag, actx, tree, tvb, offset, hf_index,
NULL);
return offset;
}
static const value_string q932_PrivateTypeOfNumber_vals[] = {
{ 0, "unknown" },
{ 1, "level2RegionalNumber" },
{ 2, "level1RegionalNumber" },
{ 3, "pTNSpecificNumber" },
{ 4, "localNumber" },
{ 6, "abbreviatedNumber" },
{ 0, NULL }
};
static int
dissect_q932_PrivateTypeOfNumber(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_integer(implicit_tag, actx, tree, tvb, offset, hf_index,
NULL);
return offset;
}
static const ber_sequence_t PrivatePartyNumber_sequence[] = {
{ &hf_q932_privateTypeOfNumber, BER_CLASS_UNI, BER_UNI_TAG_ENUMERATED, BER_FLAGS_NOOWNTAG, dissect_q932_PrivateTypeOfNumber },
{ &hf_q932_privateNumberDigits, BER_CLASS_UNI, BER_UNI_TAG_NumericString, BER_FLAGS_NOOWNTAG, dissect_q932_NumberDigits },
{ NULL, 0, 0, 0, NULL }
};
static int
dissect_q932_PrivatePartyNumber(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_sequence(implicit_tag, actx, tree, tvb, offset,
PrivatePartyNumber_sequence, hf_index, ett_q932_PrivatePartyNumber);
return offset;
}
static const ber_choice_t PartyNumber_choice[] = {
{ 0, &hf_q932_unknownPartyNumber, BER_CLASS_CON, 0, BER_FLAGS_IMPLTAG, dissect_q932_NumberDigits },
{ 1, &hf_q932_publicPartyNumber, BER_CLASS_CON, 1, BER_FLAGS_IMPLTAG, dissect_q932_PublicPartyNumber },
{ 2, &hf_q932_nsapEncodedNumber, BER_CLASS_CON, 2, BER_FLAGS_IMPLTAG, dissect_q932_NsapEncodedNumber },
{ 3, &hf_q932_dataPartyNumber, BER_CLASS_CON, 3, BER_FLAGS_IMPLTAG, dissect_q932_NumberDigits },
{ 4, &hf_q932_telexPartyNumber, BER_CLASS_CON, 4, BER_FLAGS_IMPLTAG, dissect_q932_NumberDigits },
{ 5, &hf_q932_privatePartyNumber, BER_CLASS_CON, 5, BER_FLAGS_IMPLTAG, dissect_q932_PrivatePartyNumber },
{ 8, &hf_q932_nationalStandardPartyNumber, BER_CLASS_CON, 8, BER_FLAGS_IMPLTAG, dissect_q932_NumberDigits },
{ 0, NULL, 0, 0, 0, NULL }
};
int
dissect_q932_PartyNumber(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_choice(actx, tree, tvb, offset,
PartyNumber_choice, hf_index, ett_q932_PartyNumber,
NULL);
return offset;
}
int
dissect_q932_ScreeningIndicator(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_integer(implicit_tag, actx, tree, tvb, offset, hf_index,
NULL);
return offset;
}
static int
dissect_q932_SubaddressInformation(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_octet_string(implicit_tag, actx, tree, tvb, offset, hf_index,
NULL);
return offset;
}
static int
dissect_q932_BOOLEAN(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_boolean(implicit_tag, actx, tree, tvb, offset, hf_index, NULL);
return offset;
}
static const ber_sequence_t UserSpecifiedSubaddress_sequence[] = {
{ &hf_q932_subaddressInformation, BER_CLASS_UNI, BER_UNI_TAG_OCTETSTRING, BER_FLAGS_NOOWNTAG, dissect_q932_SubaddressInformation },
{ &hf_q932_oddCountIndicator, BER_CLASS_UNI, BER_UNI_TAG_BOOLEAN, BER_FLAGS_OPTIONAL|BER_FLAGS_NOOWNTAG, dissect_q932_BOOLEAN },
{ NULL, 0, 0, 0, NULL }
};
static int
dissect_q932_UserSpecifiedSubaddress(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_sequence(implicit_tag, actx, tree, tvb, offset,
UserSpecifiedSubaddress_sequence, hf_index, ett_q932_UserSpecifiedSubaddress);
return offset;
}
static int
dissect_q932_NSAPSubaddress(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_octet_string(implicit_tag, actx, tree, tvb, offset, hf_index,
NULL);
return offset;
}
static const ber_choice_t PartySubaddress_choice[] = {
{ 0, &hf_q932_userSpecifiedSubaddress, BER_CLASS_UNI, BER_UNI_TAG_SEQUENCE, BER_FLAGS_NOOWNTAG, dissect_q932_UserSpecifiedSubaddress },
{ 1, &hf_q932_nSAPSubaddress , BER_CLASS_UNI, BER_UNI_TAG_OCTETSTRING, BER_FLAGS_NOOWNTAG, dissect_q932_NSAPSubaddress },
{ 0, NULL, 0, 0, 0, NULL }
};
int
dissect_q932_PartySubaddress(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_choice(actx, tree, tvb, offset,
PartySubaddress_choice, hf_index, ett_q932_PartySubaddress,
NULL);
return offset;
}
static const ber_sequence_t AddressScreened_sequence[] = {
{ &hf_q932_partyNumber , BER_CLASS_ANY/*choice*/, -1/*choice*/, BER_FLAGS_NOOWNTAG|BER_FLAGS_NOTCHKTAG, dissect_q932_PartyNumber },
{ &hf_q932_screeninglndicator, BER_CLASS_UNI, BER_UNI_TAG_ENUMERATED, BER_FLAGS_NOOWNTAG, dissect_q932_ScreeningIndicator },
{ &hf_q932_partySubaddress, BER_CLASS_ANY/*choice*/, -1/*choice*/, BER_FLAGS_OPTIONAL|BER_FLAGS_NOOWNTAG|BER_FLAGS_NOTCHKTAG, dissect_q932_PartySubaddress },
{ NULL, 0, 0, 0, NULL }
};
static int
dissect_q932_AddressScreened(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_sequence(implicit_tag, actx, tree, tvb, offset,
AddressScreened_sequence, hf_index, ett_q932_AddressScreened);
return offset;
}
static int
dissect_q932_NULL(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_null(implicit_tag, actx, tree, tvb, offset, hf_index);
return offset;
}
static const ber_choice_t PresentedAddressScreened_choice[] = {
{ 0, &hf_q932_presentationAlIowedAddress, BER_CLASS_CON, 0, BER_FLAGS_IMPLTAG, dissect_q932_AddressScreened },
{ 1, &hf_q932_presentationRestricted, BER_CLASS_CON, 1, BER_FLAGS_IMPLTAG, dissect_q932_NULL },
{ 2, &hf_q932_numberNotAvailableDueTolnterworking, BER_CLASS_CON, 2, BER_FLAGS_IMPLTAG, dissect_q932_NULL },
{ 3, &hf_q932_presentationRestrictedAddressScreened, BER_CLASS_CON, 3, BER_FLAGS_IMPLTAG, dissect_q932_AddressScreened },
{ 0, NULL, 0, 0, 0, NULL }
};
int
dissect_q932_PresentedAddressScreened(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_choice(actx, tree, tvb, offset,
PresentedAddressScreened_choice, hf_index, ett_q932_PresentedAddressScreened,
NULL);
return offset;
}
static const ber_sequence_t Address_sequence[] = {
{ &hf_q932_partyNumber , BER_CLASS_ANY/*choice*/, -1/*choice*/, BER_FLAGS_NOOWNTAG|BER_FLAGS_NOTCHKTAG, dissect_q932_PartyNumber },
{ &hf_q932_partySubaddress, BER_CLASS_ANY/*choice*/, -1/*choice*/, BER_FLAGS_OPTIONAL|BER_FLAGS_NOOWNTAG|BER_FLAGS_NOTCHKTAG, dissect_q932_PartySubaddress },
{ NULL, 0, 0, 0, NULL }
};
int
dissect_q932_Address(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_sequence(implicit_tag, actx, tree, tvb, offset,
Address_sequence, hf_index, ett_q932_Address);
return offset;
}
static const ber_choice_t PresentedAddressUnscreened_choice[] = {
{ 0, &hf_q932_presentationAllowedAddress, BER_CLASS_CON, 0, BER_FLAGS_IMPLTAG, dissect_q932_Address },
{ 1, &hf_q932_presentationRestricted, BER_CLASS_CON, 1, BER_FLAGS_IMPLTAG, dissect_q932_NULL },
{ 2, &hf_q932_numberNotAvailableDueTolnterworking, BER_CLASS_CON, 2, BER_FLAGS_IMPLTAG, dissect_q932_NULL },
{ 3, &hf_q932_presentationRestrictedAddress, BER_CLASS_CON, 3, BER_FLAGS_IMPLTAG, dissect_q932_Address },
{ 0, NULL, 0, 0, 0, NULL }
};
int
dissect_q932_PresentedAddressUnscreened(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_choice(actx, tree, tvb, offset,
PresentedAddressUnscreened_choice, hf_index, ett_q932_PresentedAddressUnscreened,
NULL);
return offset;
}
static const ber_sequence_t NumberScreened_sequence[] = {
{ &hf_q932_partyNumber , BER_CLASS_ANY/*choice*/, -1/*choice*/, BER_FLAGS_NOOWNTAG|BER_FLAGS_NOTCHKTAG, dissect_q932_PartyNumber },
{ &hf_q932_screeningIndicator, BER_CLASS_UNI, BER_UNI_TAG_ENUMERATED, BER_FLAGS_NOOWNTAG, dissect_q932_ScreeningIndicator },
{ NULL, 0, 0, 0, NULL }
};
static int
dissect_q932_NumberScreened(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_sequence(implicit_tag, actx, tree, tvb, offset,
NumberScreened_sequence, hf_index, ett_q932_NumberScreened);
return offset;
}
static const ber_choice_t PresentedNumberScreened_choice[] = {
{ 0, &hf_q932_presentationAllowedNumberScreened, BER_CLASS_CON, 0, BER_FLAGS_IMPLTAG, dissect_q932_NumberScreened },
{ 1, &hf_q932_presentationRestricted, BER_CLASS_CON, 1, BER_FLAGS_IMPLTAG, dissect_q932_NULL },
{ 2, &hf_q932_numberNotAvailableDueToInterworking, BER_CLASS_CON, 2, BER_FLAGS_IMPLTAG, dissect_q932_NULL },
{ 3, &hf_q932_presentationRestrictedNumberScreened, BER_CLASS_CON, 3, BER_FLAGS_IMPLTAG, dissect_q932_NumberScreened },
{ 0, NULL, 0, 0, 0, NULL }
};
int
dissect_q932_PresentedNumberScreened(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_choice(actx, tree, tvb, offset,
PresentedNumberScreened_choice, hf_index, ett_q932_PresentedNumberScreened,
NULL);
return offset;
}
static const ber_choice_t PresentedNumberUnscreened_choice[] = {
{ 0, &hf_q932_presentationAllowedNumber, BER_CLASS_CON, 0, 0, dissect_q932_PartyNumber },
{ 1, &hf_q932_presentationRestricted, BER_CLASS_CON, 1, BER_FLAGS_IMPLTAG, dissect_q932_NULL },
{ 2, &hf_q932_numberNotAvailableDueToInterworking, BER_CLASS_CON, 2, BER_FLAGS_IMPLTAG, dissect_q932_NULL },
{ 3, &hf_q932_presentationRestrictedNumber, BER_CLASS_CON, 3, 0, dissect_q932_PartyNumber },
{ 0, NULL, 0, 0, 0, NULL }
};
int
dissect_q932_PresentedNumberUnscreened(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_choice(actx, tree, tvb, offset,
PresentedNumberUnscreened_choice, hf_index, ett_q932_PresentedNumberUnscreened,
NULL);
return offset;
}
int
dissect_q932_PresentationAllowedIndicator(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_boolean(implicit_tag, actx, tree, tvb, offset, hf_index, NULL);
return offset;
}
static const value_string q932_EntityType_vals[] = {
{ 0, "endPINX" },
{ 1, "anyTypeOfPINX" },
{ 0, NULL }
};
static int
dissect_q932_EntityType(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_integer(implicit_tag, actx, tree, tvb, offset, hf_index,
NULL);
return offset;
}
static int
dissect_q932_AddressInformation(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_q932_PartyNumber(implicit_tag, tvb, offset, actx, tree, hf_index);
return offset;
}
static const ber_sequence_t NetworkFacilityExtension_U_sequence[] = {
{ &hf_q932_sourceEntity , BER_CLASS_CON, 0, BER_FLAGS_IMPLTAG, dissect_q932_EntityType },
{ &hf_q932_sourceEntityAddress, BER_CLASS_CON, 1, BER_FLAGS_OPTIONAL|BER_FLAGS_NOTCHKTAG, dissect_q932_AddressInformation },
{ &hf_q932_destinationEntity, BER_CLASS_CON, 2, BER_FLAGS_IMPLTAG, dissect_q932_EntityType },
{ &hf_q932_destinationEntityAddress, BER_CLASS_CON, 3, BER_FLAGS_OPTIONAL|BER_FLAGS_NOTCHKTAG, dissect_q932_AddressInformation },
{ NULL, 0, 0, 0, NULL }
};
static int
dissect_q932_NetworkFacilityExtension_U(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_sequence(implicit_tag, actx, tree, tvb, offset,
NetworkFacilityExtension_U_sequence, hf_index, ett_q932_NetworkFacilityExtension_U);
return offset;
}
static int
dissect_q932_NetworkFacilityExtension(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_tagged_type(implicit_tag, actx, tree, tvb, offset,
hf_index, BER_CLASS_CON, 10, TRUE, dissect_q932_NetworkFacilityExtension_U);
return offset;
}
static const value_string q932_NetworkProtocolProfile_U_vals[] = {
{ 19, "acse" },
{ 32, "dse" },
{ 0, NULL }
};
static int
dissect_q932_NetworkProtocolProfile_U(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_integer(implicit_tag, actx, tree, tvb, offset, hf_index,
NULL);
return offset;
}
static int
dissect_q932_NetworkProtocolProfile(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_tagged_type(implicit_tag, actx, tree, tvb, offset,
hf_index, BER_CLASS_CON, 18, TRUE, dissect_q932_NetworkProtocolProfile_U);
return offset;
}
static const value_string q932_InterpretationComponent_U_vals[] = {
{ 0, "discardAnyUnrecognisedInvokePdu" },
{ 1, "clearCallIfAnyInvokePduNotRecognised" },
{ 2, "rejectAnyUnrecognisedInvokePdu" },
{ 0, NULL }
};
static int
dissect_q932_InterpretationComponent_U(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_integer(implicit_tag, actx, tree, tvb, offset, hf_index,
NULL);
return offset;
}
static int
dissect_q932_InterpretationComponent(gboolean implicit_tag _U_, tvbuff_t *tvb _U_, int offset _U_, asn1_ctx_t *actx _U_, proto_tree *tree _U_, int hf_index _U_) {
offset = dissect_ber_tagged_type(implicit_tag, actx, tree, tvb, offset,
hf_index, BER_CLASS_CON, 11, TRUE, dissect_q932_InterpretationComponent_U);
return offset;
}
/*--- PDUs ---*/
static int dissect_NetworkFacilityExtension_PDU(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_) {
int offset = 0;
asn1_ctx_t asn1_ctx;
asn1_ctx_init(&asn1_ctx, ASN1_ENC_BER, TRUE, pinfo);
offset = dissect_q932_NetworkFacilityExtension(FALSE, tvb, offset, &asn1_ctx, tree, hf_q932_NetworkFacilityExtension_PDU);
return offset;
}
static int dissect_NetworkProtocolProfile_PDU(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_) {
int offset = 0;
asn1_ctx_t asn1_ctx;
asn1_ctx_init(&asn1_ctx, ASN1_ENC_BER, TRUE, pinfo);
offset = dissect_q932_NetworkProtocolProfile(FALSE, tvb, offset, &asn1_ctx, tree, hf_q932_NetworkProtocolProfile_PDU);
return offset;
}
static int dissect_InterpretationComponent_PDU(tvbuff_t *tvb _U_, packet_info *pinfo _U_, proto_tree *tree _U_, void *data _U_) {
int offset = 0;
asn1_ctx_t asn1_ctx;
asn1_ctx_init(&asn1_ctx, ASN1_ENC_BER, TRUE, pinfo);
offset = dissect_q932_InterpretationComponent(FALSE, tvb, offset, &asn1_ctx, tree, hf_q932_InterpretationComponent_PDU);
return offset;
}
/*--- End of included file: packet-q932-fn.c ---*/
#line 141 "../../asn1/q932/packet-q932-template.c"
/*--- dissect_q932_facility_ie -------------------------------------------------------*/
static void
dissect_q932_facility_ie(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, int length) {
gint8 appclass;
gboolean pc;
gint32 tag;
guint32 len;
int hoffset, eoffset;
int ie_end;
tvbuff_t *next_tvb;
ie_end = offset + length;
proto_tree_add_item(tree, hf_q932_pp, tvb, offset, 1, ENC_BIG_ENDIAN);
offset++;
while (offset < ie_end) {
hoffset = offset;
offset = get_ber_identifier(tvb, offset, &appclass, &pc, &tag);
offset = get_ber_length(tvb, offset, &len, NULL);
eoffset = offset + len;
next_tvb = tvb_new_subset_length(tvb, hoffset, eoffset - hoffset);
switch (appclass) {
case BER_CLASS_CON:
switch (tag) {
case 10 : /* Network Facility Extension */
dissect_NetworkFacilityExtension_PDU(next_tvb, pinfo, tree, NULL);
break;
case 18 : /* Network Protocol Profile */
dissect_NetworkProtocolProfile_PDU(next_tvb, pinfo, tree, NULL);
break;
case 11 : /* Interpretation Component */
dissect_InterpretationComponent_PDU(next_tvb, pinfo, tree, NULL);
break;
/* ROSE APDU */
case 1 : /* invoke */
case 2 : /* returnResult */
case 3 : /* returnError */
case 4 : /* reject */
q932_rose_ctx.apdu_depth = 1;
call_dissector_with_data(q932_ros_handle, next_tvb, pinfo, tree, &q932_rose_ctx);
break;
/* DSE APDU */
case 12 : /* begin */
case 14 : /* end */
case 15 : /* continue */
case 17 : /* abort */
offset = dissect_ber_identifier(pinfo, tree, tvb, hoffset, NULL, NULL, NULL);
offset = dissect_ber_length(pinfo, tree, tvb, offset, NULL, NULL);
proto_tree_add_expert(tree, pinfo, &ei_q932_dse_not_supported, tvb, offset, len);
break;
default:
offset = dissect_ber_identifier(pinfo, tree, tvb, hoffset, NULL, NULL, NULL);
offset = dissect_ber_length(pinfo, tree, tvb, offset, NULL, NULL);
proto_tree_add_expert(tree, pinfo, &ei_q932_unknown_component, tvb, offset, len);
}
break;
case BER_CLASS_APP:
switch (tag) {
/* ACSE APDU */
case 0 : /* aarq */
case 1 : /* aare */
case 2 : /* rlrq */
case 3 : /* rlre */
case 4 : /* abrt */
offset = dissect_ber_identifier(pinfo, tree, tvb, hoffset, NULL, NULL, NULL);
offset = dissect_ber_length(pinfo, tree, tvb, offset, NULL, NULL);
proto_tree_add_expert(tree, pinfo, &ei_q932_acse_not_supported, tvb, offset, len);
break;
default:
offset = dissect_ber_identifier(pinfo, tree, tvb, hoffset, NULL, NULL, NULL);
offset = dissect_ber_length(pinfo, tree, tvb, offset, NULL, NULL);
proto_tree_add_expert(tree, pinfo, &ei_q932_unknown_component, tvb, offset, len);
}
break;
default:
offset = dissect_ber_identifier(pinfo, tree, tvb, hoffset, NULL, NULL, NULL);
offset = dissect_ber_length(pinfo, tree, tvb, offset, NULL, NULL);
proto_tree_add_expert(tree, pinfo, &ei_q932_unknown_component, tvb, offset, len);
}
offset = eoffset;
}
}
/*--- dissect_q932_ni_ie -------------------------------------------------------*/
static void
dissect_q932_ni_ie(tvbuff_t *tvb, int offset, packet_info *pinfo, proto_tree *tree, int length) {
int remain = length;
guint8 octet = 0;
guint32 value = 0;
proto_item* ti;
while ((remain > 0) && !(octet & 0x80)) {
octet = tvb_get_guint8(tvb, offset++);
remain--;
value <<= 7;
value |= octet & 0x7F;
}
ti = proto_tree_add_uint(tree, hf_q932_nd, tvb, offset - (length - remain), length - remain, value);
if (remain > 0) {
expert_add_info(pinfo, ti, &ei_q932_asn1_encoded);
}
}
/*--- dissect_q932_ie -------------------------------------------------------*/
static void
dissect_q932_ie(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree) {
gint offset;
proto_item *ti;
proto_tree *ie_tree;
guint8 ie_type, ie_len;
offset = 0;
ti = proto_tree_add_item(tree, proto_q932, tvb, offset, -1, ENC_NA);
PROTO_ITEM_SET_HIDDEN(ti);
ie_type = tvb_get_guint8(tvb, offset);
ie_len = tvb_get_guint8(tvb, offset + 1);
ie_tree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_q932_ie, NULL,
val_to_str(ie_type, VALS(q932_str_ie_type), "unknown (0x%02X)"));
proto_tree_add_item(ie_tree, hf_q932_ie_type, tvb, offset, 1, ENC_BIG_ENDIAN);
proto_tree_add_item(ie_tree, hf_q932_ie_len, tvb, offset + 1, 1, ENC_BIG_ENDIAN);
offset += 2;
if (tvb_reported_length_remaining(tvb, offset) <= 0)
return;
switch (ie_type) {
case Q932_IE_FACILITY :
dissect_q932_facility_ie(tvb, offset, pinfo, ie_tree, ie_len);
break;
case Q932_IE_NOTIFICATION_INDICATOR :
dissect_q932_ni_ie(tvb, offset, pinfo, ie_tree, ie_len);
break;
default:
if (ie_len > 0) {
proto_tree_add_item(ie_tree, hf_q932_ie_data, tvb, offset, ie_len, ENC_NA);
}
}
}
/*--- dissect_q932_apdu -----------------------------------------------------*/
static void
dissect_q932_apdu(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree) {
call_dissector(q932_ros_handle, tvb, pinfo, tree);
}
/*--- proto_register_q932 ---------------------------------------------------*/
void proto_register_q932(void) {
/* List of fields */
static hf_register_info hf[] = {
{ &hf_q932_ie_type, { "Type", "q932.ie.type",
FT_UINT8, BASE_HEX, VALS(q932_str_ie_type), 0x0,
"Information Element Type", HFILL }},
{ &hf_q932_ie_len, { "Length", "q932.ie.len",
FT_UINT8, BASE_DEC, NULL, 0x0,
"Information Element Length", HFILL }},
{ &hf_q932_ie_data, { "Data", "q932.ie.data",
FT_BYTES, BASE_NONE, NULL, 0x0,
NULL, HFILL }},
{ &hf_q932_pp, { "Protocol profile", "q932.pp",
FT_UINT8, BASE_HEX, VALS(str_pp), 0x1F,
NULL, HFILL }},
{ &hf_q932_nd, { "Notification description", "q932.nd",
FT_UINT8, BASE_HEX, VALS(str_nd), 0x0,
NULL, HFILL }},
/*--- Included file: packet-q932-hfarr.c ---*/
#line 1 "../../asn1/q932/packet-q932-hfarr.c"
{ &hf_q932_NetworkFacilityExtension_PDU,
{ "NetworkFacilityExtension", "q932.NetworkFacilityExtension_element",
FT_NONE, BASE_NONE, NULL, 0,
NULL, HFILL }},
{ &hf_q932_NetworkProtocolProfile_PDU,
{ "NetworkProtocolProfile", "q932.NetworkProtocolProfile",
FT_UINT32, BASE_DEC, VALS(q932_NetworkProtocolProfile_U_vals), 0,
NULL, HFILL }},
{ &hf_q932_InterpretationComponent_PDU,
{ "InterpretationComponent", "q932.InterpretationComponent",
FT_UINT32, BASE_DEC, VALS(q932_InterpretationComponent_U_vals), 0,
NULL, HFILL }},
{ &hf_q932_presentationAlIowedAddress,
{ "presentationAlIowedAddress", "q932.presentationAlIowedAddress_element",
FT_NONE, BASE_NONE, NULL, 0,
"AddressScreened", HFILL }},
{ &hf_q932_presentationRestricted,
{ "presentationRestricted", "q932.presentationRestricted_element",
FT_NONE, BASE_NONE, NULL, 0,
NULL, HFILL }},
{ &hf_q932_numberNotAvailableDueTolnterworking,
{ "numberNotAvailableDueTolnterworking", "q932.numberNotAvailableDueTolnterworking_element",
FT_NONE, BASE_NONE, NULL, 0,
NULL, HFILL }},
{ &hf_q932_presentationRestrictedAddressScreened,
{ "presentationRestrictedAddress", "q932.presentationRestrictedAddress_element",
FT_NONE, BASE_NONE, NULL, 0,
"AddressScreened", HFILL }},
{ &hf_q932_presentationAllowedAddress,
{ "presentationAllowedAddress", "q932.presentationAllowedAddress_element",
FT_NONE, BASE_NONE, NULL, 0,
"Address", HFILL }},
{ &hf_q932_presentationRestrictedAddress,
{ "presentationRestrictedAddress", "q932.presentationRestrictedAddress_element",
FT_NONE, BASE_NONE, NULL, 0,
"Address", HFILL }},
{ &hf_q932_presentationAllowedNumberScreened,
{ "presentationAllowedNumber", "q932.presentationAllowedNumber_element",
FT_NONE, BASE_NONE, NULL, 0,
"NumberScreened", HFILL }},
{ &hf_q932_numberNotAvailableDueToInterworking,
{ "numberNotAvailableDueToInterworking", "q932.numberNotAvailableDueToInterworking_element",
FT_NONE, BASE_NONE, NULL, 0,
NULL, HFILL }},
{ &hf_q932_presentationRestrictedNumberScreened,
{ "presentationRestrictedNumber", "q932.presentationRestrictedNumber_element",
FT_NONE, BASE_NONE, NULL, 0,
"NumberScreened", HFILL }},
{ &hf_q932_presentationAllowedNumber,
{ "presentationAllowedNumber", "q932.presentationAllowedNumber",
FT_UINT32, BASE_DEC, VALS(q932_PartyNumber_vals), 0,
"PartyNumber", HFILL }},
{ &hf_q932_presentationRestrictedNumber,
{ "presentationRestrictedNumber", "q932.presentationRestrictedNumber",
FT_UINT32, BASE_DEC, VALS(q932_PartyNumber_vals), 0,
"PartyNumber", HFILL }},
{ &hf_q932_partyNumber,
{ "partyNumber", "q932.partyNumber",
FT_UINT32, BASE_DEC, VALS(q932_PartyNumber_vals), 0,
NULL, HFILL }},
{ &hf_q932_screeninglndicator,
{ "screeninglndicator", "q932.screeninglndicator",
FT_UINT32, BASE_DEC, VALS(q932_ScreeningIndicator_vals), 0,
"ScreeningIndicator", HFILL }},
{ &hf_q932_partySubaddress,
{ "partySubaddress", "q932.partySubaddress",
FT_UINT32, BASE_DEC, VALS(q932_PartySubaddress_vals), 0,
NULL, HFILL }},
{ &hf_q932_screeningIndicator,
{ "screeningIndicator", "q932.screeningIndicator",
FT_UINT32, BASE_DEC, VALS(q932_ScreeningIndicator_vals), 0,
NULL, HFILL }},
{ &hf_q932_unknownPartyNumber,
{ "unknownPartyNumber", "q932.unknownPartyNumber",
FT_STRING, BASE_NONE, NULL, 0,
"NumberDigits", HFILL }},
{ &hf_q932_publicPartyNumber,
{ "publicPartyNumber", "q932.publicPartyNumber_element",
FT_NONE, BASE_NONE, NULL, 0,
NULL, HFILL }},
{ &hf_q932_nsapEncodedNumber,
{ "nsapEncodedNumber", "q932.nsapEncodedNumber",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL }},
{ &hf_q932_dataPartyNumber,
{ "dataPartyNumber", "q932.dataPartyNumber",
FT_STRING, BASE_NONE, NULL, 0,
"NumberDigits", HFILL }},
{ &hf_q932_telexPartyNumber,
{ "telexPartyNumber", "q932.telexPartyNumber",
FT_STRING, BASE_NONE, NULL, 0,
"NumberDigits", HFILL }},
{ &hf_q932_privatePartyNumber,
{ "privatePartyNumber", "q932.privatePartyNumber_element",
FT_NONE, BASE_NONE, NULL, 0,
NULL, HFILL }},
{ &hf_q932_nationalStandardPartyNumber,
{ "nationalStandardPartyNumber", "q932.nationalStandardPartyNumber",
FT_STRING, BASE_NONE, NULL, 0,
"NumberDigits", HFILL }},
{ &hf_q932_publicTypeOfNumber,
{ "publicTypeOfNumber", "q932.publicTypeOfNumber",
FT_UINT32, BASE_DEC, VALS(q932_PublicTypeOfNumber_vals), 0,
NULL, HFILL }},
{ &hf_q932_publicNumberDigits,
{ "publicNumberDigits", "q932.publicNumberDigits",
FT_STRING, BASE_NONE, NULL, 0,
"NumberDigits", HFILL }},
{ &hf_q932_privateTypeOfNumber,
{ "privateTypeOfNumber", "q932.privateTypeOfNumber",
FT_UINT32, BASE_DEC, VALS(q932_PrivateTypeOfNumber_vals), 0,
NULL, HFILL }},
{ &hf_q932_privateNumberDigits,
{ "privateNumberDigits", "q932.privateNumberDigits",
FT_STRING, BASE_NONE, NULL, 0,
"NumberDigits", HFILL }},
{ &hf_q932_userSpecifiedSubaddress,
{ "userSpecifiedSubaddress", "q932.userSpecifiedSubaddress_element",
FT_NONE, BASE_NONE, NULL, 0,
NULL, HFILL }},
{ &hf_q932_nSAPSubaddress,
{ "nSAPSubaddress", "q932.nSAPSubaddress",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL }},
{ &hf_q932_subaddressInformation,
{ "subaddressInformation", "q932.subaddressInformation",
FT_BYTES, BASE_NONE, NULL, 0,
NULL, HFILL }},
{ &hf_q932_oddCountIndicator,
{ "oddCountIndicator", "q932.oddCountIndicator",
FT_BOOLEAN, BASE_NONE, NULL, 0,
"BOOLEAN", HFILL }},
{ &hf_q932_sourceEntity,
{ "sourceEntity", "q932.sourceEntity",
FT_UINT32, BASE_DEC, VALS(q932_EntityType_vals), 0,
"EntityType", HFILL }},
{ &hf_q932_sourceEntityAddress,
{ "sourceEntityAddress", "q932.sourceEntityAddress",
FT_UINT32, BASE_DEC, VALS(q932_PartyNumber_vals), 0,
"AddressInformation", HFILL }},
{ &hf_q932_destinationEntity,
{ "destinationEntity", "q932.destinationEntity",
FT_UINT32, BASE_DEC, VALS(q932_EntityType_vals), 0,
"EntityType", HFILL }},
{ &hf_q932_destinationEntityAddress,
{ "destinationEntityAddress", "q932.destinationEntityAddress",
FT_UINT32, BASE_DEC, VALS(q932_PartyNumber_vals), 0,
"AddressInformation", HFILL }},
/*--- End of included file: packet-q932-hfarr.c ---*/
#line 310 "../../asn1/q932/packet-q932-template.c"
};
/* List of subtrees */
static gint *ett[] = {
&ett_q932,
&ett_q932_ie,
/*--- Included file: packet-q932-ettarr.c ---*/
#line 1 "../../asn1/q932/packet-q932-ettarr.c"
&ett_q932_PresentedAddressScreened,
&ett_q932_PresentedAddressUnscreened,
&ett_q932_PresentedNumberScreened,
&ett_q932_PresentedNumberUnscreened,
&ett_q932_AddressScreened,
&ett_q932_NumberScreened,
&ett_q932_Address,
&ett_q932_PartyNumber,
&ett_q932_PublicPartyNumber,
&ett_q932_PrivatePartyNumber,
&ett_q932_PartySubaddress,
&ett_q932_UserSpecifiedSubaddress,
&ett_q932_NetworkFacilityExtension_U,
/*--- End of included file: packet-q932-ettarr.c ---*/
#line 317 "../../asn1/q932/packet-q932-template.c"
};
static ei_register_info ei[] = {
{ &ei_q932_dse_not_supported, { "q932.dse_not_supported", PI_UNDECODED, PI_WARN, "DSE APDU (not supported)", EXPFILL }},
{ &ei_q932_acse_not_supported, { "q932.acse_not_supported", PI_UNDECODED, PI_WARN, "ACSE APDU (not supported)", EXPFILL }},
{ &ei_q932_unknown_component, { "q932.unknown_component", PI_UNDECODED, PI_WARN, "Unknown Component", EXPFILL }},
{ &ei_q932_asn1_encoded, { "q932.asn1_encoded", PI_UNDECODED, PI_WARN, "ASN.1 Encoded Data Structure(NOT IMPLEMENTED)", EXPFILL }},
};
module_t *q932_module;
expert_module_t* expert_q932;
static const enum_val_t facility_encoding[] = {
{"Facility as QSIG", "Dissect facility as QSIG", 0},
{"Facility as ETSI", "Dissect facility as ETSI", 1},
{NULL, NULL, -1}
};
/* Register protocol and dissector */
proto_q932 = proto_register_protocol(PNAME, PSNAME, PFNAME);
register_dissector("q932.apdu", dissect_q932_apdu, proto_q932);
/* Register fields and subtrees */
proto_register_field_array(proto_q932, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
expert_q932 = expert_register_protocol(proto_q932);
expert_register_field_array(expert_q932, ei, array_length(ei));
rose_ctx_init(&q932_rose_ctx);
/* Register dissector tables */
q932_rose_ctx.arg_global_dissector_table = register_dissector_table("q932.ros.global.arg", "Q.932 Operation Argument (global opcode)", FT_STRING, BASE_NONE);
q932_rose_ctx.res_global_dissector_table = register_dissector_table("q932.ros.global.res", "Q.932 Operation Result (global opcode)", FT_STRING, BASE_NONE);
q932_rose_ctx.err_global_dissector_table = register_dissector_table("q932.ros.global.err", "Q.932 Error (global opcode)", FT_STRING, BASE_NONE);
qsig_arg_local_dissector_table = register_dissector_table("q932.ros.local.arg", "Q.932 Operation Argument (local opcode)", FT_UINT32, BASE_HEX);
qsig_res_local_dissector_table = register_dissector_table("q932.ros.local.res", "Q.932 Operation Result (local opcode)", FT_UINT32, BASE_HEX);
qsig_err_local_dissector_table = register_dissector_table("q932.ros.local.err", "Q.932 Error (local opcode)", FT_UINT32, BASE_HEX);
etsi_arg_local_dissector_table = register_dissector_table("q932.ros.etsi.local.arg", "Q.932 ETSI Operation Argument (local opcode)", FT_UINT32, BASE_HEX);
etsi_res_local_dissector_table = register_dissector_table("q932.ros.etsi.local.res", "Q.932 ETSI Operation Result (local opcode)", FT_UINT32, BASE_HEX);
etsi_err_local_dissector_table = register_dissector_table("q932.ros.etsi.local.err", "Q.932 ETSI Error (local opcode)", FT_UINT32, BASE_HEX);
q932_module = prefs_register_protocol(proto_q932, proto_reg_handoff_q932);
prefs_register_enum_preference(q932_module, "facility_encoding",
"Type of Facility encoding",
"Type of Facility encoding",
&g_facility_encoding, facility_encoding, FALSE);
}
/*--- proto_reg_handoff_q932 ------------------------------------------------*/
void proto_reg_handoff_q932(void) {
dissector_handle_t q932_ie_handle;
static gboolean q931_prefs_initialized = FALSE;
if (!q931_prefs_initialized) {
q932_ie_handle = create_dissector_handle(dissect_q932_ie, proto_q932);
/* Facility */
dissector_add_uint("q931.ie", (0x00 << 8) | Q932_IE_FACILITY, q932_ie_handle);
/* Notification indicator */
dissector_add_uint("q931.ie", (0x00 << 8) | Q932_IE_NOTIFICATION_INDICATOR, q932_ie_handle);
q932_ros_handle = find_dissector("q932.ros");
}
if(g_facility_encoding == 0){
q932_rose_ctx.arg_local_dissector_table = qsig_arg_local_dissector_table;
q932_rose_ctx.res_local_dissector_table = qsig_res_local_dissector_table;
q932_rose_ctx.err_local_dissector_table = qsig_err_local_dissector_table;
}else{
q932_rose_ctx.arg_local_dissector_table = etsi_arg_local_dissector_table;
q932_rose_ctx.res_local_dissector_table = etsi_res_local_dissector_table;
q932_rose_ctx.err_local_dissector_table = etsi_err_local_dissector_table;
}
}
/*---------------------------------------------------------------------------*/
| 42.467341 | 237 | 0.699233 |
536eb2f9806abc075482ca9d83a2cab71661a2f0 | 462 | h | C | orocaboy2_app/src/hw/core/mem.h | oroca/oroca_boy2 | 7b3a2f5e0521f1bf8936921a54f9aed74f3ef5a9 | [
"Apache-2.0"
] | null | null | null | orocaboy2_app/src/hw/core/mem.h | oroca/oroca_boy2 | 7b3a2f5e0521f1bf8936921a54f9aed74f3ef5a9 | [
"Apache-2.0"
] | 1 | 2018-02-11T05:06:09.000Z | 2018-02-11T13:19:45.000Z | orocaboy2_app/src/hw/core/mem.h | oroca/oroca_boy2 | 7b3a2f5e0521f1bf8936921a54f9aed74f3ef5a9 | [
"Apache-2.0"
] | 2 | 2018-02-10T15:13:06.000Z | 2019-12-23T13:46:50.000Z | /*
* mem.h
*
* Created on: Feb 08, 2018
* Author: baram
*/
#ifndef MEM_H_
#define MEM_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "hw_def.h"
#include "bsp.h"
void memInit(uint32_t addr, uint32_t length);
void *memMalloc(uint32_t size);
void memFree(void *ptr);
void *memCalloc(size_t nmemb, size_t size);
void *memRealloc(void *ptr, size_t size);
#ifdef __cplusplus
}
#endif
#endif /* MEM_H_ */
| 13.2 | 47 | 0.621212 |
1583791879188566f3134e05f9910eec453fdb64 | 636,822 | h | C | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/x86_64-oesdk-linux/usr/lib/arm-openbmc-linux-gnueabi/gcc/arm-openbmc-linux-gnueabi/9.1.0/include/arm_neon.h | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/x86_64-oesdk-linux/usr/lib/arm-openbmc-linux-gnueabi/gcc/arm-openbmc-linux-gnueabi/9.1.0/include/arm_neon.h | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | openbmc/build/tmp/deploy/sdk/witherspoon-2019-08-08/sysroots/x86_64-oesdk-linux/usr/lib/arm-openbmc-linux-gnueabi/gcc/arm-openbmc-linux-gnueabi/9.1.0/include/arm_neon.h | sotaoverride/backup | ca53a10b72295387ef4948a9289cb78ab70bc449 | [
"Apache-2.0"
] | null | null | null | /* ARM NEON intrinsics include file.
Copyright (C) 2006-2019 Free Software Foundation, Inc.
Contributed by CodeSourcery.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 3, or (at your
option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
#ifndef _GCC_ARM_NEON_H
#define _GCC_ARM_NEON_H 1
#ifndef __ARM_FP
#error \
"NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard"
#else
#pragma GCC push_options
#pragma GCC target("fpu=neon")
#ifdef __cplusplus
extern "C" {
#endif
#include <arm_fp16.h>
#include <stdint.h>
typedef __simd64_int8_t int8x8_t;
typedef __simd64_int16_t int16x4_t;
typedef __simd64_int32_t int32x2_t;
typedef __builtin_neon_di int64x1_t;
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
typedef __fp16 float16_t;
typedef __simd64_float16_t float16x4_t;
#endif
typedef __simd64_float32_t float32x2_t;
typedef __simd64_poly8_t poly8x8_t;
typedef __simd64_poly16_t poly16x4_t;
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
typedef __builtin_neon_poly64 poly64x1_t;
#pragma GCC pop_options
typedef __simd64_uint8_t uint8x8_t;
typedef __simd64_uint16_t uint16x4_t;
typedef __simd64_uint32_t uint32x2_t;
typedef __builtin_neon_udi uint64x1_t;
typedef __simd128_int8_t int8x16_t;
typedef __simd128_int16_t int16x8_t;
typedef __simd128_int32_t int32x4_t;
typedef __simd128_int64_t int64x2_t;
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
typedef __simd128_float16_t float16x8_t;
#endif
typedef __simd128_float32_t float32x4_t;
typedef __simd128_poly8_t poly8x16_t;
typedef __simd128_poly16_t poly16x8_t;
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
typedef __builtin_neon_poly64 poly64x2_t __attribute__((__vector_size__(16)));
#pragma GCC pop_options
typedef __simd128_uint8_t uint8x16_t;
typedef __simd128_uint16_t uint16x8_t;
typedef __simd128_uint32_t uint32x4_t;
typedef __simd128_uint64_t uint64x2_t;
typedef float float32_t;
/* The Poly types are user visible and live in their own world,
keep them that way. */
typedef __builtin_neon_poly8 poly8_t;
typedef __builtin_neon_poly16 poly16_t;
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
typedef __builtin_neon_poly64 poly64_t;
typedef __builtin_neon_poly128 poly128_t;
#pragma GCC pop_options
typedef struct int8x8x2_t
{
int8x8_t val[2];
} int8x8x2_t;
typedef struct int8x16x2_t
{
int8x16_t val[2];
} int8x16x2_t;
typedef struct int16x4x2_t
{
int16x4_t val[2];
} int16x4x2_t;
typedef struct int16x8x2_t
{
int16x8_t val[2];
} int16x8x2_t;
typedef struct int32x2x2_t
{
int32x2_t val[2];
} int32x2x2_t;
typedef struct int32x4x2_t
{
int32x4_t val[2];
} int32x4x2_t;
typedef struct int64x1x2_t
{
int64x1_t val[2];
} int64x1x2_t;
typedef struct int64x2x2_t
{
int64x2_t val[2];
} int64x2x2_t;
typedef struct uint8x8x2_t
{
uint8x8_t val[2];
} uint8x8x2_t;
typedef struct uint8x16x2_t
{
uint8x16_t val[2];
} uint8x16x2_t;
typedef struct uint16x4x2_t
{
uint16x4_t val[2];
} uint16x4x2_t;
typedef struct uint16x8x2_t
{
uint16x8_t val[2];
} uint16x8x2_t;
typedef struct uint32x2x2_t
{
uint32x2_t val[2];
} uint32x2x2_t;
typedef struct uint32x4x2_t
{
uint32x4_t val[2];
} uint32x4x2_t;
typedef struct uint64x1x2_t
{
uint64x1_t val[2];
} uint64x1x2_t;
typedef struct uint64x2x2_t
{
uint64x2_t val[2];
} uint64x2x2_t;
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
typedef struct float16x4x2_t
{
float16x4_t val[2];
} float16x4x2_t;
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
typedef struct float16x8x2_t
{
float16x8_t val[2];
} float16x8x2_t;
#endif
typedef struct float32x2x2_t
{
float32x2_t val[2];
} float32x2x2_t;
typedef struct float32x4x2_t
{
float32x4_t val[2];
} float32x4x2_t;
typedef struct poly8x8x2_t
{
poly8x8_t val[2];
} poly8x8x2_t;
typedef struct poly8x16x2_t
{
poly8x16_t val[2];
} poly8x16x2_t;
typedef struct poly16x4x2_t
{
poly16x4_t val[2];
} poly16x4x2_t;
typedef struct poly16x8x2_t
{
poly16x8_t val[2];
} poly16x8x2_t;
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
typedef struct poly64x1x2_t
{
poly64x1_t val[2];
} poly64x1x2_t;
typedef struct poly64x2x2_t
{
poly64x2_t val[2];
} poly64x2x2_t;
#pragma GCC pop_options
typedef struct int8x8x3_t
{
int8x8_t val[3];
} int8x8x3_t;
typedef struct int8x16x3_t
{
int8x16_t val[3];
} int8x16x3_t;
typedef struct int16x4x3_t
{
int16x4_t val[3];
} int16x4x3_t;
typedef struct int16x8x3_t
{
int16x8_t val[3];
} int16x8x3_t;
typedef struct int32x2x3_t
{
int32x2_t val[3];
} int32x2x3_t;
typedef struct int32x4x3_t
{
int32x4_t val[3];
} int32x4x3_t;
typedef struct int64x1x3_t
{
int64x1_t val[3];
} int64x1x3_t;
typedef struct int64x2x3_t
{
int64x2_t val[3];
} int64x2x3_t;
typedef struct uint8x8x3_t
{
uint8x8_t val[3];
} uint8x8x3_t;
typedef struct uint8x16x3_t
{
uint8x16_t val[3];
} uint8x16x3_t;
typedef struct uint16x4x3_t
{
uint16x4_t val[3];
} uint16x4x3_t;
typedef struct uint16x8x3_t
{
uint16x8_t val[3];
} uint16x8x3_t;
typedef struct uint32x2x3_t
{
uint32x2_t val[3];
} uint32x2x3_t;
typedef struct uint32x4x3_t
{
uint32x4_t val[3];
} uint32x4x3_t;
typedef struct uint64x1x3_t
{
uint64x1_t val[3];
} uint64x1x3_t;
typedef struct uint64x2x3_t
{
uint64x2_t val[3];
} uint64x2x3_t;
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
typedef struct float16x4x3_t
{
float16x4_t val[3];
} float16x4x3_t;
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
typedef struct float16x8x3_t
{
float16x8_t val[3];
} float16x8x3_t;
#endif
typedef struct float32x2x3_t
{
float32x2_t val[3];
} float32x2x3_t;
typedef struct float32x4x3_t
{
float32x4_t val[3];
} float32x4x3_t;
typedef struct poly8x8x3_t
{
poly8x8_t val[3];
} poly8x8x3_t;
typedef struct poly8x16x3_t
{
poly8x16_t val[3];
} poly8x16x3_t;
typedef struct poly16x4x3_t
{
poly16x4_t val[3];
} poly16x4x3_t;
typedef struct poly16x8x3_t
{
poly16x8_t val[3];
} poly16x8x3_t;
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
typedef struct poly64x1x3_t
{
poly64x1_t val[3];
} poly64x1x3_t;
typedef struct poly64x2x3_t
{
poly64x2_t val[3];
} poly64x2x3_t;
#pragma GCC pop_options
typedef struct int8x8x4_t
{
int8x8_t val[4];
} int8x8x4_t;
typedef struct int8x16x4_t
{
int8x16_t val[4];
} int8x16x4_t;
typedef struct int16x4x4_t
{
int16x4_t val[4];
} int16x4x4_t;
typedef struct int16x8x4_t
{
int16x8_t val[4];
} int16x8x4_t;
typedef struct int32x2x4_t
{
int32x2_t val[4];
} int32x2x4_t;
typedef struct int32x4x4_t
{
int32x4_t val[4];
} int32x4x4_t;
typedef struct int64x1x4_t
{
int64x1_t val[4];
} int64x1x4_t;
typedef struct int64x2x4_t
{
int64x2_t val[4];
} int64x2x4_t;
typedef struct uint8x8x4_t
{
uint8x8_t val[4];
} uint8x8x4_t;
typedef struct uint8x16x4_t
{
uint8x16_t val[4];
} uint8x16x4_t;
typedef struct uint16x4x4_t
{
uint16x4_t val[4];
} uint16x4x4_t;
typedef struct uint16x8x4_t
{
uint16x8_t val[4];
} uint16x8x4_t;
typedef struct uint32x2x4_t
{
uint32x2_t val[4];
} uint32x2x4_t;
typedef struct uint32x4x4_t
{
uint32x4_t val[4];
} uint32x4x4_t;
typedef struct uint64x1x4_t
{
uint64x1_t val[4];
} uint64x1x4_t;
typedef struct uint64x2x4_t
{
uint64x2_t val[4];
} uint64x2x4_t;
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
typedef struct float16x4x4_t
{
float16x4_t val[4];
} float16x4x4_t;
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
typedef struct float16x8x4_t
{
float16x8_t val[4];
} float16x8x4_t;
#endif
typedef struct float32x2x4_t
{
float32x2_t val[4];
} float32x2x4_t;
typedef struct float32x4x4_t
{
float32x4_t val[4];
} float32x4x4_t;
typedef struct poly8x8x4_t
{
poly8x8_t val[4];
} poly8x8x4_t;
typedef struct poly8x16x4_t
{
poly8x16_t val[4];
} poly8x16x4_t;
typedef struct poly16x4x4_t
{
poly16x4_t val[4];
} poly16x4x4_t;
typedef struct poly16x8x4_t
{
poly16x8_t val[4];
} poly16x8x4_t;
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
typedef struct poly64x1x4_t
{
poly64x1_t val[4];
} poly64x1x4_t;
typedef struct poly64x2x4_t
{
poly64x2_t val[4];
} poly64x2x4_t;
#pragma GCC pop_options
/* vadd */
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vadd_s8(int8x8_t __a, int8x8_t __b)
{
return __a + __b;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vadd_s16(int16x4_t __a, int16x4_t __b)
{
return __a + __b;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vadd_s32(int32x2_t __a, int32x2_t __b)
{
return __a + __b;
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vadd_f32(float32x2_t __a, float32x2_t __b)
{
#ifdef __FAST_MATH__
return __a + __b;
#else
return (float32x2_t)__builtin_neon_vaddv2sf(__a, __b);
#endif
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vadd_u8(uint8x8_t __a, uint8x8_t __b)
{
return __a + __b;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vadd_u16(uint16x4_t __a, uint16x4_t __b)
{
return __a + __b;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vadd_u32(uint32x2_t __a, uint32x2_t __b)
{
return __a + __b;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vadd_s64(int64x1_t __a, int64x1_t __b)
{
return __a + __b;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vadd_u64(uint64x1_t __a, uint64x1_t __b)
{
return __a + __b;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddq_s8(int8x16_t __a, int8x16_t __b)
{
return __a + __b;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddq_s16(int16x8_t __a, int16x8_t __b)
{
return __a + __b;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddq_s32(int32x4_t __a, int32x4_t __b)
{
return __a + __b;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddq_s64(int64x2_t __a, int64x2_t __b)
{
return __a + __b;
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddq_f32(float32x4_t __a, float32x4_t __b)
{
#ifdef __FAST_MATH__
return __a + __b;
#else
return (float32x4_t)__builtin_neon_vaddv4sf(__a, __b);
#endif
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddq_u8(uint8x16_t __a, uint8x16_t __b)
{
return __a + __b;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddq_u16(uint16x8_t __a, uint16x8_t __b)
{
return __a + __b;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddq_u32(uint32x4_t __a, uint32x4_t __b)
{
return __a + __b;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddq_u64(uint64x2_t __a, uint64x2_t __b)
{
return __a + __b;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddl_s8(int8x8_t __a, int8x8_t __b)
{
return (int16x8_t)__builtin_neon_vaddlsv8qi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddl_s16(int16x4_t __a, int16x4_t __b)
{
return (int32x4_t)__builtin_neon_vaddlsv4hi(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddl_s32(int32x2_t __a, int32x2_t __b)
{
return (int64x2_t)__builtin_neon_vaddlsv2si(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddl_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint16x8_t)__builtin_neon_vaddluv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddl_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint32x4_t)__builtin_neon_vaddluv4hi((int16x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddl_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint64x2_t)__builtin_neon_vaddluv2si((int32x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddw_s8(int16x8_t __a, int8x8_t __b)
{
return (int16x8_t)__builtin_neon_vaddwsv8qi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddw_s16(int32x4_t __a, int16x4_t __b)
{
return (int32x4_t)__builtin_neon_vaddwsv4hi(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddw_s32(int64x2_t __a, int32x2_t __b)
{
return (int64x2_t)__builtin_neon_vaddwsv2si(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddw_u8(uint16x8_t __a, uint8x8_t __b)
{
return (uint16x8_t)__builtin_neon_vaddwuv8qi((int16x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddw_u16(uint32x4_t __a, uint16x4_t __b)
{
return (uint32x4_t)__builtin_neon_vaddwuv4hi((int32x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddw_u32(uint64x2_t __a, uint32x2_t __b)
{
return (uint64x2_t)__builtin_neon_vaddwuv2si((int64x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhadd_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vhaddsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhadd_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vhaddsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhadd_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vhaddsv2si(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhadd_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vhadduv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhadd_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vhadduv4hi((int16x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhadd_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vhadduv2si((int32x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhaddq_s8(int8x16_t __a, int8x16_t __b)
{
return (int8x16_t)__builtin_neon_vhaddsv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhaddq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vhaddsv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhaddq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vhaddsv4si(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhaddq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vhadduv16qi((int8x16_t)__a,
(int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhaddq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vhadduv8hi((int16x8_t)__a,
(int16x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhaddq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vhadduv4si((int32x4_t)__a,
(int32x4_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrhadd_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vrhaddsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrhadd_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vrhaddsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrhadd_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vrhaddsv2si(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrhadd_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vrhadduv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrhadd_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vrhadduv4hi((int16x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrhadd_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vrhadduv2si((int32x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrhaddq_s8(int8x16_t __a, int8x16_t __b)
{
return (int8x16_t)__builtin_neon_vrhaddsv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrhaddq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vrhaddsv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrhaddq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vrhaddsv4si(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrhaddq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vrhadduv16qi((int8x16_t)__a,
(int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrhaddq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vrhadduv8hi((int16x8_t)__a,
(int16x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrhaddq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vrhadduv4si((int32x4_t)__a,
(int32x4_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqadd_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vqaddsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqadd_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vqaddsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqadd_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vqaddsv2si(__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqadd_s64(int64x1_t __a, int64x1_t __b)
{
return (int64x1_t)__builtin_neon_vqaddsdi(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqadd_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vqadduv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqadd_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vqadduv4hi((int16x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqadd_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vqadduv2si((int32x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqadd_u64(uint64x1_t __a, uint64x1_t __b)
{
return (uint64x1_t)__builtin_neon_vqaddudi((int64x1_t)__a, (int64x1_t)__b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_s8(int8x16_t __a, int8x16_t __b)
{
return (int8x16_t)__builtin_neon_vqaddsv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vqaddsv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vqaddsv4si(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_s64(int64x2_t __a, int64x2_t __b)
{
return (int64x2_t)__builtin_neon_vqaddsv2di(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vqadduv16qi((int8x16_t)__a,
(int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vqadduv8hi((int16x8_t)__a,
(int16x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vqadduv4si((int32x4_t)__a,
(int32x4_t)__b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqaddq_u64(uint64x2_t __a, uint64x2_t __b)
{
return (uint64x2_t)__builtin_neon_vqadduv2di((int64x2_t)__a,
(int64x2_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_s16(int16x8_t __a, int16x8_t __b)
{
return (int8x8_t)__builtin_neon_vaddhnv8hi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_s32(int32x4_t __a, int32x4_t __b)
{
return (int16x4_t)__builtin_neon_vaddhnv4si(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_s64(int64x2_t __a, int64x2_t __b)
{
return (int32x2_t)__builtin_neon_vaddhnv2di(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint8x8_t)__builtin_neon_vaddhnv8hi((int16x8_t)__a, (int16x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint16x4_t)__builtin_neon_vaddhnv4si((int32x4_t)__a,
(int32x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddhn_u64(uint64x2_t __a, uint64x2_t __b)
{
return (uint32x2_t)__builtin_neon_vaddhnv2di((int64x2_t)__a,
(int64x2_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_s16(int16x8_t __a, int16x8_t __b)
{
return (int8x8_t)__builtin_neon_vraddhnv8hi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_s32(int32x4_t __a, int32x4_t __b)
{
return (int16x4_t)__builtin_neon_vraddhnv4si(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_s64(int64x2_t __a, int64x2_t __b)
{
return (int32x2_t)__builtin_neon_vraddhnv2di(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint8x8_t)__builtin_neon_vraddhnv8hi((int16x8_t)__a,
(int16x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint16x4_t)__builtin_neon_vraddhnv4si((int32x4_t)__a,
(int32x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vraddhn_u64(uint64x2_t __a, uint64x2_t __b)
{
return (uint32x2_t)__builtin_neon_vraddhnv2di((int64x2_t)__a,
(int64x2_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_s8(int8x8_t __a, int8x8_t __b)
{
return __a * __b;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_s16(int16x4_t __a, int16x4_t __b)
{
return __a * __b;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_s32(int32x2_t __a, int32x2_t __b)
{
return __a * __b;
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_f32(float32x2_t __a, float32x2_t __b)
{
#ifdef __FAST_MATH__
return __a * __b;
#else
return (float32x2_t)__builtin_neon_vmulfv2sf(__a, __b);
#endif
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_u8(uint8x8_t __a, uint8x8_t __b)
{
return __a * __b;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_u16(uint16x4_t __a, uint16x4_t __b)
{
return __a * __b;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_u32(uint32x2_t __a, uint32x2_t __b)
{
return __a * __b;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_s8(int8x16_t __a, int8x16_t __b)
{
return __a * __b;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_s16(int16x8_t __a, int16x8_t __b)
{
return __a * __b;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_s32(int32x4_t __a, int32x4_t __b)
{
return __a * __b;
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_f32(float32x4_t __a, float32x4_t __b)
{
#ifdef __FAST_MATH__
return __a * __b;
#else
return (float32x4_t)__builtin_neon_vmulfv4sf(__a, __b);
#endif
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_u8(uint8x16_t __a, uint8x16_t __b)
{
return __a * __b;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_u16(uint16x8_t __a, uint16x8_t __b)
{
return __a * __b;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_u32(uint32x4_t __a, uint32x4_t __b)
{
return __a * __b;
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_p8(poly8x8_t __a, poly8x8_t __b)
{
return (poly8x8_t)__builtin_neon_vmulpv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_p8(poly8x16_t __a, poly8x16_t __b)
{
return (poly8x16_t)__builtin_neon_vmulpv16qi((int8x16_t)__a,
(int8x16_t)__b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vqdmulhv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vqdmulhv2si(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vqdmulhv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vqdmulhv4si(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vqrdmulhv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vqrdmulhv2si(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vqrdmulhv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vqrdmulhv4si(__a, __b);
}
#ifdef __ARM_FEATURE_QRDMX
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlah_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c)
{
return (int16x4_t)__builtin_neon_vqrdmlahv4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlah_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c)
{
return (int32x2_t)__builtin_neon_vqrdmlahv2si(__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahq_s16(int16x8_t __a, int16x8_t __b, int16x8_t __c)
{
return (int16x8_t)__builtin_neon_vqrdmlahv8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahq_s32(int32x4_t __a, int32x4_t __b, int32x4_t __c)
{
return (int32x4_t)__builtin_neon_vqrdmlahv4si(__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlsh_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c)
{
return (int16x4_t)__builtin_neon_vqrdmlshv4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlsh_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c)
{
return (int32x2_t)__builtin_neon_vqrdmlshv2si(__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshq_s16(int16x8_t __a, int16x8_t __b, int16x8_t __c)
{
return (int16x8_t)__builtin_neon_vqrdmlshv8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshq_s32(int32x4_t __a, int32x4_t __b, int32x4_t __c)
{
return (int32x4_t)__builtin_neon_vqrdmlshv4si(__a, __b, __c);
}
#endif
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_s8(int8x8_t __a, int8x8_t __b)
{
return (int16x8_t)__builtin_neon_vmullsv8qi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_s16(int16x4_t __a, int16x4_t __b)
{
return (int32x4_t)__builtin_neon_vmullsv4hi(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_s32(int32x2_t __a, int32x2_t __b)
{
return (int64x2_t)__builtin_neon_vmullsv2si(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint16x8_t)__builtin_neon_vmulluv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint32x4_t)__builtin_neon_vmulluv4hi((int16x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint64x2_t)__builtin_neon_vmulluv2si((int32x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_p8(poly8x8_t __a, poly8x8_t __b)
{
return (poly16x8_t)__builtin_neon_vmullpv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_s16(int16x4_t __a, int16x4_t __b)
{
return (int32x4_t)__builtin_neon_vqdmullv4hi(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_s32(int32x2_t __a, int32x2_t __b)
{
return (int64x2_t)__builtin_neon_vqdmullv2si(__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_s8(int8x8_t __a, int8x8_t __b, int8x8_t __c)
{
return (int8x8_t)__builtin_neon_vmlav8qi(__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c)
{
return (int16x4_t)__builtin_neon_vmlav4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c)
{
return (int32x2_t)__builtin_neon_vmlav2si(__a, __b, __c);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c)
{
return (float32x2_t)__builtin_neon_vmlav2sf(__a, __b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
return (uint8x8_t)__builtin_neon_vmlav8qi((int8x8_t)__a, (int8x8_t)__b,
(int8x8_t)__c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
return (uint16x4_t)__builtin_neon_vmlav4hi((int16x4_t)__a, (int16x4_t)__b,
(int16x4_t)__c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
return (uint32x2_t)__builtin_neon_vmlav2si((int32x2_t)__a, (int32x2_t)__b,
(int32x2_t)__c);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_s8(int8x16_t __a, int8x16_t __b, int8x16_t __c)
{
return (int8x16_t)__builtin_neon_vmlav16qi(__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_s16(int16x8_t __a, int16x8_t __b, int16x8_t __c)
{
return (int16x8_t)__builtin_neon_vmlav8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_s32(int32x4_t __a, int32x4_t __b, int32x4_t __c)
{
return (int32x4_t)__builtin_neon_vmlav4si(__a, __b, __c);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_f32(float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
return (float32x4_t)__builtin_neon_vmlav4sf(__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
{
return (uint8x16_t)__builtin_neon_vmlav16qi((int8x16_t)__a, (int8x16_t)__b,
(int8x16_t)__c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_u16(uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
return (uint16x8_t)__builtin_neon_vmlav8hi((int16x8_t)__a, (int16x8_t)__b,
(int16x8_t)__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
return (uint32x4_t)__builtin_neon_vmlav4si((int32x4_t)__a, (int32x4_t)__b,
(int32x4_t)__c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_s8(int16x8_t __a, int8x8_t __b, int8x8_t __c)
{
return (int16x8_t)__builtin_neon_vmlalsv8qi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
return (int32x4_t)__builtin_neon_vmlalsv4hi(__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
return (int64x2_t)__builtin_neon_vmlalsv2si(__a, __b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_u8(uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
return (uint16x8_t)__builtin_neon_vmlaluv8qi((int16x8_t)__a, (int8x8_t)__b,
(int8x8_t)__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_u16(uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
return (uint32x4_t)__builtin_neon_vmlaluv4hi((int32x4_t)__a, (int16x4_t)__b,
(int16x4_t)__c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_u32(uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
return (uint64x2_t)__builtin_neon_vmlaluv2si((int64x2_t)__a, (int32x2_t)__b,
(int32x2_t)__c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
return (int32x4_t)__builtin_neon_vqdmlalv4hi(__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
return (int64x2_t)__builtin_neon_vqdmlalv2si(__a, __b, __c);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_s8(int8x8_t __a, int8x8_t __b, int8x8_t __c)
{
return (int8x8_t)__builtin_neon_vmlsv8qi(__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c)
{
return (int16x4_t)__builtin_neon_vmlsv4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c)
{
return (int32x2_t)__builtin_neon_vmlsv2si(__a, __b, __c);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c)
{
return (float32x2_t)__builtin_neon_vmlsv2sf(__a, __b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
return (uint8x8_t)__builtin_neon_vmlsv8qi((int8x8_t)__a, (int8x8_t)__b,
(int8x8_t)__c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
return (uint16x4_t)__builtin_neon_vmlsv4hi((int16x4_t)__a, (int16x4_t)__b,
(int16x4_t)__c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
return (uint32x2_t)__builtin_neon_vmlsv2si((int32x2_t)__a, (int32x2_t)__b,
(int32x2_t)__c);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_s8(int8x16_t __a, int8x16_t __b, int8x16_t __c)
{
return (int8x16_t)__builtin_neon_vmlsv16qi(__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_s16(int16x8_t __a, int16x8_t __b, int16x8_t __c)
{
return (int16x8_t)__builtin_neon_vmlsv8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_s32(int32x4_t __a, int32x4_t __b, int32x4_t __c)
{
return (int32x4_t)__builtin_neon_vmlsv4si(__a, __b, __c);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_f32(float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
return (float32x4_t)__builtin_neon_vmlsv4sf(__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
{
return (uint8x16_t)__builtin_neon_vmlsv16qi((int8x16_t)__a, (int8x16_t)__b,
(int8x16_t)__c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_u16(uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
return (uint16x8_t)__builtin_neon_vmlsv8hi((int16x8_t)__a, (int16x8_t)__b,
(int16x8_t)__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
return (uint32x4_t)__builtin_neon_vmlsv4si((int32x4_t)__a, (int32x4_t)__b,
(int32x4_t)__c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_s8(int16x8_t __a, int8x8_t __b, int8x8_t __c)
{
return (int16x8_t)__builtin_neon_vmlslsv8qi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
return (int32x4_t)__builtin_neon_vmlslsv4hi(__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
return (int64x2_t)__builtin_neon_vmlslsv2si(__a, __b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_u8(uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
return (uint16x8_t)__builtin_neon_vmlsluv8qi((int16x8_t)__a, (int8x8_t)__b,
(int8x8_t)__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_u16(uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
return (uint32x4_t)__builtin_neon_vmlsluv4hi((int32x4_t)__a, (int16x4_t)__b,
(int16x4_t)__c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_u32(uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
return (uint64x2_t)__builtin_neon_vmlsluv2si((int64x2_t)__a, (int32x2_t)__b,
(int32x2_t)__c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
return (int32x4_t)__builtin_neon_vqdmlslv4hi(__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
return (int64x2_t)__builtin_neon_vqdmlslv2si(__a, __b, __c);
}
#pragma GCC push_options
#pragma GCC target("fpu=neon-vfpv4")
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfma_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c)
{
return (float32x2_t)__builtin_neon_vfmav2sf(__a, __b, __c);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmaq_f32(float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
return (float32x4_t)__builtin_neon_vfmav4sf(__a, __b, __c);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfms_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c)
{
return (float32x2_t)__builtin_neon_vfmsv2sf(__a, __b, __c);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmsq_f32(float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
return (float32x4_t)__builtin_neon_vfmsv4sf(__a, __b, __c);
}
#pragma GCC pop_options
#if __ARM_ARCH >= 8
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndn_f32(float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrintnv2sf(__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndnq_f32(float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrintnv4sf(__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrnda_f32(float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrintav2sf(__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndaq_f32(float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrintav4sf(__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndp_f32(float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrintpv2sf(__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndpq_f32(float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrintpv4sf(__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndm_f32(float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrintmv2sf(__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndmq_f32(float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrintmv4sf(__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndx_f32(float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrintxv2sf(__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndxq_f32(float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrintxv4sf(__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrnd_f32(float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrintzv2sf(__a);
}
#endif
#if __ARM_ARCH >= 8
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndq_f32(float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrintzv4sf(__a);
}
#endif
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsub_s8(int8x8_t __a, int8x8_t __b)
{
return __a - __b;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsub_s16(int16x4_t __a, int16x4_t __b)
{
return __a - __b;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsub_s32(int32x2_t __a, int32x2_t __b)
{
return __a - __b;
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsub_f32(float32x2_t __a, float32x2_t __b)
{
#ifdef __FAST_MATH__
return __a - __b;
#else
return (float32x2_t)__builtin_neon_vsubv2sf(__a, __b);
#endif
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsub_u8(uint8x8_t __a, uint8x8_t __b)
{
return __a - __b;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsub_u16(uint16x4_t __a, uint16x4_t __b)
{
return __a - __b;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsub_u32(uint32x2_t __a, uint32x2_t __b)
{
return __a - __b;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsub_s64(int64x1_t __a, int64x1_t __b)
{
return __a - __b;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsub_u64(uint64x1_t __a, uint64x1_t __b)
{
return __a - __b;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubq_s8(int8x16_t __a, int8x16_t __b)
{
return __a - __b;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubq_s16(int16x8_t __a, int16x8_t __b)
{
return __a - __b;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubq_s32(int32x4_t __a, int32x4_t __b)
{
return __a - __b;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubq_s64(int64x2_t __a, int64x2_t __b)
{
return __a - __b;
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubq_f32(float32x4_t __a, float32x4_t __b)
{
#ifdef __FAST_MATH__
return __a - __b;
#else
return (float32x4_t)__builtin_neon_vsubv4sf(__a, __b);
#endif
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubq_u8(uint8x16_t __a, uint8x16_t __b)
{
return __a - __b;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubq_u16(uint16x8_t __a, uint16x8_t __b)
{
return __a - __b;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubq_u32(uint32x4_t __a, uint32x4_t __b)
{
return __a - __b;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubq_u64(uint64x2_t __a, uint64x2_t __b)
{
return __a - __b;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubl_s8(int8x8_t __a, int8x8_t __b)
{
return (int16x8_t)__builtin_neon_vsublsv8qi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubl_s16(int16x4_t __a, int16x4_t __b)
{
return (int32x4_t)__builtin_neon_vsublsv4hi(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubl_s32(int32x2_t __a, int32x2_t __b)
{
return (int64x2_t)__builtin_neon_vsublsv2si(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubl_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint16x8_t)__builtin_neon_vsubluv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubl_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint32x4_t)__builtin_neon_vsubluv4hi((int16x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubl_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint64x2_t)__builtin_neon_vsubluv2si((int32x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubw_s8(int16x8_t __a, int8x8_t __b)
{
return (int16x8_t)__builtin_neon_vsubwsv8qi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubw_s16(int32x4_t __a, int16x4_t __b)
{
return (int32x4_t)__builtin_neon_vsubwsv4hi(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubw_s32(int64x2_t __a, int32x2_t __b)
{
return (int64x2_t)__builtin_neon_vsubwsv2si(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubw_u8(uint16x8_t __a, uint8x8_t __b)
{
return (uint16x8_t)__builtin_neon_vsubwuv8qi((int16x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubw_u16(uint32x4_t __a, uint16x4_t __b)
{
return (uint32x4_t)__builtin_neon_vsubwuv4hi((int32x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubw_u32(uint64x2_t __a, uint32x2_t __b)
{
return (uint64x2_t)__builtin_neon_vsubwuv2si((int64x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhsub_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vhsubsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhsub_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vhsubsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhsub_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vhsubsv2si(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhsub_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vhsubuv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhsub_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vhsubuv4hi((int16x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhsub_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vhsubuv2si((int32x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhsubq_s8(int8x16_t __a, int8x16_t __b)
{
return (int8x16_t)__builtin_neon_vhsubsv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhsubq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vhsubsv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhsubq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vhsubsv4si(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhsubq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vhsubuv16qi((int8x16_t)__a,
(int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhsubq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vhsubuv8hi((int16x8_t)__a,
(int16x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vhsubq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vhsubuv4si((int32x4_t)__a,
(int32x4_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsub_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vqsubsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsub_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vqsubsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsub_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vqsubsv2si(__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsub_s64(int64x1_t __a, int64x1_t __b)
{
return (int64x1_t)__builtin_neon_vqsubsdi(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsub_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vqsubuv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsub_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vqsubuv4hi((int16x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsub_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vqsubuv2si((int32x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsub_u64(uint64x1_t __a, uint64x1_t __b)
{
return (uint64x1_t)__builtin_neon_vqsubudi((int64x1_t)__a, (int64x1_t)__b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_s8(int8x16_t __a, int8x16_t __b)
{
return (int8x16_t)__builtin_neon_vqsubsv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vqsubsv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vqsubsv4si(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_s64(int64x2_t __a, int64x2_t __b)
{
return (int64x2_t)__builtin_neon_vqsubsv2di(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vqsubuv16qi((int8x16_t)__a,
(int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vqsubuv8hi((int16x8_t)__a,
(int16x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vqsubuv4si((int32x4_t)__a,
(int32x4_t)__b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqsubq_u64(uint64x2_t __a, uint64x2_t __b)
{
return (uint64x2_t)__builtin_neon_vqsubuv2di((int64x2_t)__a,
(int64x2_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_s16(int16x8_t __a, int16x8_t __b)
{
return (int8x8_t)__builtin_neon_vsubhnv8hi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_s32(int32x4_t __a, int32x4_t __b)
{
return (int16x4_t)__builtin_neon_vsubhnv4si(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_s64(int64x2_t __a, int64x2_t __b)
{
return (int32x2_t)__builtin_neon_vsubhnv2di(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint8x8_t)__builtin_neon_vsubhnv8hi((int16x8_t)__a, (int16x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint16x4_t)__builtin_neon_vsubhnv4si((int32x4_t)__a,
(int32x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubhn_u64(uint64x2_t __a, uint64x2_t __b)
{
return (uint32x2_t)__builtin_neon_vsubhnv2di((int64x2_t)__a,
(int64x2_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_s16(int16x8_t __a, int16x8_t __b)
{
return (int8x8_t)__builtin_neon_vrsubhnv8hi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_s32(int32x4_t __a, int32x4_t __b)
{
return (int16x4_t)__builtin_neon_vrsubhnv4si(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_s64(int64x2_t __a, int64x2_t __b)
{
return (int32x2_t)__builtin_neon_vrsubhnv2di(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint8x8_t)__builtin_neon_vrsubhnv8hi((int16x8_t)__a,
(int16x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint16x4_t)__builtin_neon_vrsubhnv4si((int32x4_t)__a,
(int32x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsubhn_u64(uint64x2_t __a, uint64x2_t __b)
{
return (uint32x2_t)__builtin_neon_vrsubhnv2di((int64x2_t)__a,
(int64x2_t)__b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceq_s8(int8x8_t __a, int8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vceqv8qi(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceq_s16(int16x4_t __a, int16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vceqv4hi(__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceq_s32(int32x2_t __a, int32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vceqv2si(__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceq_f32(float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vceqv2sf(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceq_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vceqv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceq_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vceqv4hi((int16x4_t)__a, (int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceq_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vceqv2si((int32x2_t)__a, (int32x2_t)__b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceq_p8(poly8x8_t __a, poly8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vceqv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceqq_s8(int8x16_t __a, int8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vceqv16qi(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceqq_s16(int16x8_t __a, int16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vceqv8hi(__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceqq_s32(int32x4_t __a, int32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vceqv4si(__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceqq_f32(float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vceqv4sf(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceqq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vceqv16qi((int8x16_t)__a, (int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceqq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vceqv8hi((int16x8_t)__a, (int16x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceqq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vceqv4si((int32x4_t)__a, (int32x4_t)__b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceqq_p8(poly8x16_t __a, poly8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vceqv16qi((int8x16_t)__a, (int8x16_t)__b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcge_s8(int8x8_t __a, int8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vcgev8qi(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcge_s16(int16x4_t __a, int16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcgev4hi(__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcge_s32(int32x2_t __a, int32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcgev2si(__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcge_f32(float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcgev2sf(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcge_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vcgeuv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcge_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcgeuv4hi((int16x4_t)__a, (int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcge_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcgeuv2si((int32x2_t)__a, (int32x2_t)__b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_s8(int8x16_t __a, int8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vcgev16qi(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_s16(int16x8_t __a, int16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcgev8hi(__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_s32(int32x4_t __a, int32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcgev4si(__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_f32(float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcgev4sf(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vcgeuv16qi((int8x16_t)__a,
(int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcgeuv8hi((int16x8_t)__a, (int16x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcgeuv4si((int32x4_t)__a, (int32x4_t)__b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcle_s8(int8x8_t __a, int8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vcgev8qi(__b, __a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcle_s16(int16x4_t __a, int16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcgev4hi(__b, __a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcle_s32(int32x2_t __a, int32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcgev2si(__b, __a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcle_f32(float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcgev2sf(__b, __a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcle_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vcgeuv8qi((int8x8_t)__b, (int8x8_t)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcle_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcgeuv4hi((int16x4_t)__b, (int16x4_t)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcle_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcgeuv2si((int32x2_t)__b, (int32x2_t)__a);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcleq_s8(int8x16_t __a, int8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vcgev16qi(__b, __a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcleq_s16(int16x8_t __a, int16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcgev8hi(__b, __a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcleq_s32(int32x4_t __a, int32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcgev4si(__b, __a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcleq_f32(float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcgev4sf(__b, __a);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcleq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vcgeuv16qi((int8x16_t)__b,
(int8x16_t)__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcleq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcgeuv8hi((int16x8_t)__b, (int16x8_t)__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcleq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcgeuv4si((int32x4_t)__b, (int32x4_t)__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgt_s8(int8x8_t __a, int8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vcgtv8qi(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgt_s16(int16x4_t __a, int16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcgtv4hi(__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgt_s32(int32x2_t __a, int32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcgtv2si(__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgt_f32(float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcgtv2sf(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgt_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vcgtuv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgt_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcgtuv4hi((int16x4_t)__a, (int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgt_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcgtuv2si((int32x2_t)__a, (int32x2_t)__b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_s8(int8x16_t __a, int8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vcgtv16qi(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_s16(int16x8_t __a, int16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcgtv8hi(__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_s32(int32x4_t __a, int32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcgtv4si(__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_f32(float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcgtv4sf(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vcgtuv16qi((int8x16_t)__a,
(int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcgtuv8hi((int16x8_t)__a, (int16x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcgtuv4si((int32x4_t)__a, (int32x4_t)__b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclt_s8(int8x8_t __a, int8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vcgtv8qi(__b, __a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclt_s16(int16x4_t __a, int16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcgtv4hi(__b, __a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclt_s32(int32x2_t __a, int32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcgtv2si(__b, __a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclt_f32(float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcgtv2sf(__b, __a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclt_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vcgtuv8qi((int8x8_t)__b, (int8x8_t)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclt_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcgtuv4hi((int16x4_t)__b, (int16x4_t)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclt_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcgtuv2si((int32x2_t)__b, (int32x2_t)__a);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcltq_s8(int8x16_t __a, int8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vcgtv16qi(__b, __a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcltq_s16(int16x8_t __a, int16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcgtv8hi(__b, __a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcltq_s32(int32x4_t __a, int32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcgtv4si(__b, __a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcltq_f32(float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcgtv4sf(__b, __a);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcltq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vcgtuv16qi((int8x16_t)__b,
(int8x16_t)__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcltq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcgtuv8hi((int16x8_t)__b, (int16x8_t)__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcltq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcgtuv4si((int32x4_t)__b, (int32x4_t)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcage_f32(float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcagev2sf(__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcageq_f32(float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcagev4sf(__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcale_f32(float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcagev2sf(__b, __a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcaleq_f32(float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcagev4sf(__b, __a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcagt_f32(float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcagtv2sf(__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcagtq_f32(float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcagtv4sf(__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcalt_f32(float32x2_t __a, float32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vcagtv2sf(__b, __a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcaltq_f32(float32x4_t __a, float32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vcagtv4sf(__b, __a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtst_s8(int8x8_t __a, int8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vtstv8qi(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtst_s16(int16x4_t __a, int16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vtstv4hi(__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtst_s32(int32x2_t __a, int32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vtstv2si(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtst_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vtstv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtst_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vtstv4hi((int16x4_t)__a, (int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtst_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vtstv2si((int32x2_t)__a, (int32x2_t)__b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtst_p8(poly8x8_t __a, poly8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vtstv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtst_p16(poly16x4_t __a, poly16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vtstv4hi((int16x4_t)__a, (int16x4_t)__b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtstq_s8(int8x16_t __a, int8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vtstv16qi(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtstq_s16(int16x8_t __a, int16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vtstv8hi(__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtstq_s32(int32x4_t __a, int32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vtstv4si(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtstq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vtstv16qi((int8x16_t)__a, (int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtstq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vtstv8hi((int16x8_t)__a, (int16x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtstq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vtstv4si((int32x4_t)__a, (int32x4_t)__b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtstq_p8(poly8x16_t __a, poly8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vtstv16qi((int8x16_t)__a, (int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtstq_p16(poly16x8_t __a, poly16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vtstv8hi((int16x8_t)__a, (int16x8_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabd_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vabdsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabd_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vabdsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabd_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vabdsv2si(__a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabd_f32(float32x2_t __a, float32x2_t __b)
{
return (float32x2_t)__builtin_neon_vabdfv2sf(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabd_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vabduv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabd_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vabduv4hi((int16x4_t)__a, (int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabd_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vabduv2si((int32x2_t)__a, (int32x2_t)__b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdq_s8(int8x16_t __a, int8x16_t __b)
{
return (int8x16_t)__builtin_neon_vabdsv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vabdsv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vabdsv4si(__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdq_f32(float32x4_t __a, float32x4_t __b)
{
return (float32x4_t)__builtin_neon_vabdfv4sf(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vabduv16qi((int8x16_t)__a,
(int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vabduv8hi((int16x8_t)__a, (int16x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vabduv4si((int32x4_t)__a, (int32x4_t)__b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdl_s8(int8x8_t __a, int8x8_t __b)
{
return (int16x8_t)__builtin_neon_vabdlsv8qi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdl_s16(int16x4_t __a, int16x4_t __b)
{
return (int32x4_t)__builtin_neon_vabdlsv4hi(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdl_s32(int32x2_t __a, int32x2_t __b)
{
return (int64x2_t)__builtin_neon_vabdlsv2si(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdl_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint16x8_t)__builtin_neon_vabdluv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdl_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint32x4_t)__builtin_neon_vabdluv4hi((int16x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdl_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint64x2_t)__builtin_neon_vabdluv2si((int32x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaba_s8(int8x8_t __a, int8x8_t __b, int8x8_t __c)
{
return (int8x8_t)__builtin_neon_vabasv8qi(__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaba_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c)
{
return (int16x4_t)__builtin_neon_vabasv4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaba_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c)
{
return (int32x2_t)__builtin_neon_vabasv2si(__a, __b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaba_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
return (uint8x8_t)__builtin_neon_vabauv8qi((int8x8_t)__a, (int8x8_t)__b,
(int8x8_t)__c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaba_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
return (uint16x4_t)__builtin_neon_vabauv4hi((int16x4_t)__a, (int16x4_t)__b,
(int16x4_t)__c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaba_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
return (uint32x2_t)__builtin_neon_vabauv2si((int32x2_t)__a, (int32x2_t)__b,
(int32x2_t)__c);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabaq_s8(int8x16_t __a, int8x16_t __b, int8x16_t __c)
{
return (int8x16_t)__builtin_neon_vabasv16qi(__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabaq_s16(int16x8_t __a, int16x8_t __b, int16x8_t __c)
{
return (int16x8_t)__builtin_neon_vabasv8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabaq_s32(int32x4_t __a, int32x4_t __b, int32x4_t __c)
{
return (int32x4_t)__builtin_neon_vabasv4si(__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabaq_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
{
return (uint8x16_t)__builtin_neon_vabauv16qi((int8x16_t)__a, (int8x16_t)__b,
(int8x16_t)__c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabaq_u16(uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
return (uint16x8_t)__builtin_neon_vabauv8hi((int16x8_t)__a, (int16x8_t)__b,
(int16x8_t)__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabaq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
return (uint32x4_t)__builtin_neon_vabauv4si((int32x4_t)__a, (int32x4_t)__b,
(int32x4_t)__c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabal_s8(int16x8_t __a, int8x8_t __b, int8x8_t __c)
{
return (int16x8_t)__builtin_neon_vabalsv8qi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabal_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c)
{
return (int32x4_t)__builtin_neon_vabalsv4hi(__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabal_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c)
{
return (int64x2_t)__builtin_neon_vabalsv2si(__a, __b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabal_u8(uint16x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
return (uint16x8_t)__builtin_neon_vabaluv8qi((int16x8_t)__a, (int8x8_t)__b,
(int8x8_t)__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabal_u16(uint32x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
return (uint32x4_t)__builtin_neon_vabaluv4hi((int32x4_t)__a, (int16x4_t)__b,
(int16x4_t)__c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabal_u32(uint64x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
return (uint64x2_t)__builtin_neon_vabaluv2si((int64x2_t)__a, (int32x2_t)__b,
(int32x2_t)__c);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmax_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vmaxsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmax_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vmaxsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmax_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vmaxsv2si(__a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmax_f32(float32x2_t __a, float32x2_t __b)
{
return (float32x2_t)__builtin_neon_vmaxfv2sf(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmax_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vmaxuv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmax_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vmaxuv4hi((int16x4_t)__a, (int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmax_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vmaxuv2si((int32x2_t)__a, (int32x2_t)__b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_s8(int8x16_t __a, int8x16_t __b)
{
return (int8x16_t)__builtin_neon_vmaxsv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vmaxsv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vmaxsv4si(__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_f32(float32x4_t __a, float32x4_t __b)
{
return (float32x4_t)__builtin_neon_vmaxfv4sf(__a, __b);
}
#pragma GCC push_options
#pragma GCC target("fpu=neon-fp-armv8")
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmaxnm_f32(float32x2_t a, float32x2_t b)
{
return (float32x2_t)__builtin_neon_vmaxnmv2sf(a, b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmaxnmq_f32(float32x4_t a, float32x4_t b)
{
return (float32x4_t)__builtin_neon_vmaxnmv4sf(a, b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vminnm_f32(float32x2_t a, float32x2_t b)
{
return (float32x2_t)__builtin_neon_vminnmv2sf(a, b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vminnmq_f32(float32x4_t a, float32x4_t b)
{
return (float32x4_t)__builtin_neon_vminnmv4sf(a, b);
}
#pragma GCC pop_options
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vmaxuv16qi((int8x16_t)__a,
(int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vmaxuv8hi((int16x8_t)__a, (int16x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vmaxuv4si((int32x4_t)__a, (int32x4_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmin_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vminsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmin_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vminsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmin_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vminsv2si(__a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmin_f32(float32x2_t __a, float32x2_t __b)
{
return (float32x2_t)__builtin_neon_vminfv2sf(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmin_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vminuv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmin_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vminuv4hi((int16x4_t)__a, (int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmin_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vminuv2si((int32x2_t)__a, (int32x2_t)__b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vminq_s8(int8x16_t __a, int8x16_t __b)
{
return (int8x16_t)__builtin_neon_vminsv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vminq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vminsv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vminq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vminsv4si(__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vminq_f32(float32x4_t __a, float32x4_t __b)
{
return (float32x4_t)__builtin_neon_vminfv4sf(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vminq_u8(uint8x16_t __a, uint8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vminuv16qi((int8x16_t)__a,
(int8x16_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vminq_u16(uint16x8_t __a, uint16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vminuv8hi((int16x8_t)__a, (int16x8_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vminq_u32(uint32x4_t __a, uint32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vminuv4si((int32x4_t)__a, (int32x4_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadd_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vpaddv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadd_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vpaddv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadd_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vpaddv2si(__a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadd_f32(float32x2_t __a, float32x2_t __b)
{
return (float32x2_t)__builtin_neon_vpaddv2sf(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadd_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vpaddv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadd_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vpaddv4hi((int16x4_t)__a, (int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadd_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vpaddv2si((int32x2_t)__a, (int32x2_t)__b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_s8(int8x8_t __a)
{
return (int16x4_t)__builtin_neon_vpaddlsv8qi(__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_s16(int16x4_t __a)
{
return (int32x2_t)__builtin_neon_vpaddlsv4hi(__a);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_s32(int32x2_t __a)
{
return (int64x1_t)__builtin_neon_vpaddlsv2si(__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_u8(uint8x8_t __a)
{
return (uint16x4_t)__builtin_neon_vpaddluv8qi((int8x8_t)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_u16(uint16x4_t __a)
{
return (uint32x2_t)__builtin_neon_vpaddluv4hi((int16x4_t)__a);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_u32(uint32x2_t __a)
{
return (uint64x1_t)__builtin_neon_vpaddluv2si((int32x2_t)__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_s8(int8x16_t __a)
{
return (int16x8_t)__builtin_neon_vpaddlsv16qi(__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_s16(int16x8_t __a)
{
return (int32x4_t)__builtin_neon_vpaddlsv8hi(__a);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_s32(int32x4_t __a)
{
return (int64x2_t)__builtin_neon_vpaddlsv4si(__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_u8(uint8x16_t __a)
{
return (uint16x8_t)__builtin_neon_vpaddluv16qi((int8x16_t)__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_u16(uint16x8_t __a)
{
return (uint32x4_t)__builtin_neon_vpaddluv8hi((int16x8_t)__a);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_u32(uint32x4_t __a)
{
return (uint64x2_t)__builtin_neon_vpaddluv4si((int32x4_t)__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadal_s8(int16x4_t __a, int8x8_t __b)
{
return (int16x4_t)__builtin_neon_vpadalsv8qi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadal_s16(int32x2_t __a, int16x4_t __b)
{
return (int32x2_t)__builtin_neon_vpadalsv4hi(__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadal_s32(int64x1_t __a, int32x2_t __b)
{
return (int64x1_t)__builtin_neon_vpadalsv2si(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadal_u8(uint16x4_t __a, uint8x8_t __b)
{
return (uint16x4_t)__builtin_neon_vpadaluv8qi((int16x4_t)__a,
(int8x8_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadal_u16(uint32x2_t __a, uint16x4_t __b)
{
return (uint32x2_t)__builtin_neon_vpadaluv4hi((int32x2_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadal_u32(uint64x1_t __a, uint32x2_t __b)
{
return (uint64x1_t)__builtin_neon_vpadaluv2si((int64x1_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadalq_s8(int16x8_t __a, int8x16_t __b)
{
return (int16x8_t)__builtin_neon_vpadalsv16qi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadalq_s16(int32x4_t __a, int16x8_t __b)
{
return (int32x4_t)__builtin_neon_vpadalsv8hi(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadalq_s32(int64x2_t __a, int32x4_t __b)
{
return (int64x2_t)__builtin_neon_vpadalsv4si(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadalq_u8(uint16x8_t __a, uint8x16_t __b)
{
return (uint16x8_t)__builtin_neon_vpadaluv16qi((int16x8_t)__a,
(int8x16_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadalq_u16(uint32x4_t __a, uint16x8_t __b)
{
return (uint32x4_t)__builtin_neon_vpadaluv8hi((int32x4_t)__a,
(int16x8_t)__b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadalq_u32(uint64x2_t __a, uint32x4_t __b)
{
return (uint64x2_t)__builtin_neon_vpadaluv4si((int64x2_t)__a,
(int32x4_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmax_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vpmaxsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmax_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vpmaxsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmax_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vpmaxsv2si(__a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmax_f32(float32x2_t __a, float32x2_t __b)
{
return (float32x2_t)__builtin_neon_vpmaxfv2sf(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmax_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vpmaxuv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmax_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vpmaxuv4hi((int16x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmax_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vpmaxuv2si((int32x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmin_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vpminsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmin_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vpminsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmin_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vpminsv2si(__a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmin_f32(float32x2_t __a, float32x2_t __b)
{
return (float32x2_t)__builtin_neon_vpminfv2sf(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmin_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vpminuv8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmin_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vpminuv4hi((int16x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmin_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vpminuv2si((int32x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrecps_f32(float32x2_t __a, float32x2_t __b)
{
return (float32x2_t)__builtin_neon_vrecpsv2sf(__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrecpsq_f32(float32x4_t __a, float32x4_t __b)
{
return (float32x4_t)__builtin_neon_vrecpsv4sf(__a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsqrts_f32(float32x2_t __a, float32x2_t __b)
{
return (float32x2_t)__builtin_neon_vrsqrtsv2sf(__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsqrtsq_f32(float32x4_t __a, float32x4_t __b)
{
return (float32x4_t)__builtin_neon_vrsqrtsv4sf(__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vshlsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vshlsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vshlsv2si(__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_s64(int64x1_t __a, int64x1_t __b)
{
return (int64x1_t)__builtin_neon_vshlsdi(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_u8(uint8x8_t __a, int8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vshluv8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_u16(uint16x4_t __a, int16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vshluv4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_u32(uint32x2_t __a, int32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vshluv2si((int32x2_t)__a, __b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_u64(uint64x1_t __a, int64x1_t __b)
{
return (uint64x1_t)__builtin_neon_vshludi((int64x1_t)__a, __b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_s8(int8x16_t __a, int8x16_t __b)
{
return (int8x16_t)__builtin_neon_vshlsv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vshlsv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vshlsv4si(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_s64(int64x2_t __a, int64x2_t __b)
{
return (int64x2_t)__builtin_neon_vshlsv2di(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_u8(uint8x16_t __a, int8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vshluv16qi((int8x16_t)__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_u16(uint16x8_t __a, int16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vshluv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_u32(uint32x4_t __a, int32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vshluv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_u64(uint64x2_t __a, int64x2_t __b)
{
return (uint64x2_t)__builtin_neon_vshluv2di((int64x2_t)__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshl_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vrshlsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshl_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vrshlsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshl_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vrshlsv2si(__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshl_s64(int64x1_t __a, int64x1_t __b)
{
return (int64x1_t)__builtin_neon_vrshlsdi(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshl_u8(uint8x8_t __a, int8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vrshluv8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshl_u16(uint16x4_t __a, int16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vrshluv4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshl_u32(uint32x2_t __a, int32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vrshluv2si((int32x2_t)__a, __b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshl_u64(uint64x1_t __a, int64x1_t __b)
{
return (uint64x1_t)__builtin_neon_vrshludi((int64x1_t)__a, __b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_s8(int8x16_t __a, int8x16_t __b)
{
return (int8x16_t)__builtin_neon_vrshlsv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vrshlsv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vrshlsv4si(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_s64(int64x2_t __a, int64x2_t __b)
{
return (int64x2_t)__builtin_neon_vrshlsv2di(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_u8(uint8x16_t __a, int8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vrshluv16qi((int8x16_t)__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_u16(uint16x8_t __a, int16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vrshluv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_u32(uint32x4_t __a, int32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vrshluv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshlq_u64(uint64x2_t __a, int64x2_t __b)
{
return (uint64x2_t)__builtin_neon_vrshluv2di((int64x2_t)__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vqshlsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vqshlsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vqshlsv2si(__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_s64(int64x1_t __a, int64x1_t __b)
{
return (int64x1_t)__builtin_neon_vqshlsdi(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_u8(uint8x8_t __a, int8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vqshluv8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_u16(uint16x4_t __a, int16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vqshluv4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_u32(uint32x2_t __a, int32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vqshluv2si((int32x2_t)__a, __b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_u64(uint64x1_t __a, int64x1_t __b)
{
return (uint64x1_t)__builtin_neon_vqshludi((int64x1_t)__a, __b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_s8(int8x16_t __a, int8x16_t __b)
{
return (int8x16_t)__builtin_neon_vqshlsv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vqshlsv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vqshlsv4si(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_s64(int64x2_t __a, int64x2_t __b)
{
return (int64x2_t)__builtin_neon_vqshlsv2di(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_u8(uint8x16_t __a, int8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vqshluv16qi((int8x16_t)__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_u16(uint16x8_t __a, int16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vqshluv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_u32(uint32x4_t __a, int32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vqshluv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_u64(uint64x2_t __a, int64x2_t __b)
{
return (uint64x2_t)__builtin_neon_vqshluv2di((int64x2_t)__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vqrshlsv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x4_t)__builtin_neon_vqrshlsv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x2_t)__builtin_neon_vqrshlsv2si(__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_s64(int64x1_t __a, int64x1_t __b)
{
return (int64x1_t)__builtin_neon_vqrshlsdi(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_u8(uint8x8_t __a, int8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vqrshluv8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_u16(uint16x4_t __a, int16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vqrshluv4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_u32(uint32x2_t __a, int32x2_t __b)
{
return (uint32x2_t)__builtin_neon_vqrshluv2si((int32x2_t)__a, __b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshl_u64(uint64x1_t __a, int64x1_t __b)
{
return (uint64x1_t)__builtin_neon_vqrshludi((int64x1_t)__a, __b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_s8(int8x16_t __a, int8x16_t __b)
{
return (int8x16_t)__builtin_neon_vqrshlsv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_s16(int16x8_t __a, int16x8_t __b)
{
return (int16x8_t)__builtin_neon_vqrshlsv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_s32(int32x4_t __a, int32x4_t __b)
{
return (int32x4_t)__builtin_neon_vqrshlsv4si(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_s64(int64x2_t __a, int64x2_t __b)
{
return (int64x2_t)__builtin_neon_vqrshlsv2di(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_u8(uint8x16_t __a, int8x16_t __b)
{
return (uint8x16_t)__builtin_neon_vqrshluv16qi((int8x16_t)__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_u16(uint16x8_t __a, int16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vqrshluv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_u32(uint32x4_t __a, int32x4_t __b)
{
return (uint32x4_t)__builtin_neon_vqrshluv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshlq_u64(uint64x2_t __a, int64x2_t __b)
{
return (uint64x2_t)__builtin_neon_vqrshluv2di((int64x2_t)__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_s8(int8x8_t __a, const int __b)
{
return (int8x8_t)__builtin_neon_vshrs_nv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_s16(int16x4_t __a, const int __b)
{
return (int16x4_t)__builtin_neon_vshrs_nv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_s32(int32x2_t __a, const int __b)
{
return (int32x2_t)__builtin_neon_vshrs_nv2si(__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_s64(int64x1_t __a, const int __b)
{
return (int64x1_t)__builtin_neon_vshrs_ndi(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_u8(uint8x8_t __a, const int __b)
{
return (uint8x8_t)__builtin_neon_vshru_nv8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_u16(uint16x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vshru_nv4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_u32(uint32x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vshru_nv2si((int32x2_t)__a, __b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshr_n_u64(uint64x1_t __a, const int __b)
{
return (uint64x1_t)__builtin_neon_vshru_ndi((int64x1_t)__a, __b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_s8(int8x16_t __a, const int __b)
{
return (int8x16_t)__builtin_neon_vshrs_nv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_s16(int16x8_t __a, const int __b)
{
return (int16x8_t)__builtin_neon_vshrs_nv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_s32(int32x4_t __a, const int __b)
{
return (int32x4_t)__builtin_neon_vshrs_nv4si(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_s64(int64x2_t __a, const int __b)
{
return (int64x2_t)__builtin_neon_vshrs_nv2di(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_u8(uint8x16_t __a, const int __b)
{
return (uint8x16_t)__builtin_neon_vshru_nv16qi((int8x16_t)__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_u16(uint16x8_t __a, const int __b)
{
return (uint16x8_t)__builtin_neon_vshru_nv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_u32(uint32x4_t __a, const int __b)
{
return (uint32x4_t)__builtin_neon_vshru_nv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrq_n_u64(uint64x2_t __a, const int __b)
{
return (uint64x2_t)__builtin_neon_vshru_nv2di((int64x2_t)__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_s8(int8x8_t __a, const int __b)
{
return (int8x8_t)__builtin_neon_vrshrs_nv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_s16(int16x4_t __a, const int __b)
{
return (int16x4_t)__builtin_neon_vrshrs_nv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_s32(int32x2_t __a, const int __b)
{
return (int32x2_t)__builtin_neon_vrshrs_nv2si(__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_s64(int64x1_t __a, const int __b)
{
return (int64x1_t)__builtin_neon_vrshrs_ndi(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_u8(uint8x8_t __a, const int __b)
{
return (uint8x8_t)__builtin_neon_vrshru_nv8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_u16(uint16x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vrshru_nv4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_u32(uint32x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vrshru_nv2si((int32x2_t)__a, __b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshr_n_u64(uint64x1_t __a, const int __b)
{
return (uint64x1_t)__builtin_neon_vrshru_ndi((int64x1_t)__a, __b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_s8(int8x16_t __a, const int __b)
{
return (int8x16_t)__builtin_neon_vrshrs_nv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_s16(int16x8_t __a, const int __b)
{
return (int16x8_t)__builtin_neon_vrshrs_nv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_s32(int32x4_t __a, const int __b)
{
return (int32x4_t)__builtin_neon_vrshrs_nv4si(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_s64(int64x2_t __a, const int __b)
{
return (int64x2_t)__builtin_neon_vrshrs_nv2di(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_u8(uint8x16_t __a, const int __b)
{
return (uint8x16_t)__builtin_neon_vrshru_nv16qi((int8x16_t)__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_u16(uint16x8_t __a, const int __b)
{
return (uint16x8_t)__builtin_neon_vrshru_nv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_u32(uint32x4_t __a, const int __b)
{
return (uint32x4_t)__builtin_neon_vrshru_nv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrq_n_u64(uint64x2_t __a, const int __b)
{
return (uint64x2_t)__builtin_neon_vrshru_nv2di((int64x2_t)__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrn_n_s16(int16x8_t __a, const int __b)
{
return (int8x8_t)__builtin_neon_vshrn_nv8hi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrn_n_s32(int32x4_t __a, const int __b)
{
return (int16x4_t)__builtin_neon_vshrn_nv4si(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrn_n_s64(int64x2_t __a, const int __b)
{
return (int32x2_t)__builtin_neon_vshrn_nv2di(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrn_n_u16(uint16x8_t __a, const int __b)
{
return (uint8x8_t)__builtin_neon_vshrn_nv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrn_n_u32(uint32x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vshrn_nv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshrn_n_u64(uint64x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vshrn_nv2di((int64x2_t)__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrn_n_s16(int16x8_t __a, const int __b)
{
return (int8x8_t)__builtin_neon_vrshrn_nv8hi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrn_n_s32(int32x4_t __a, const int __b)
{
return (int16x4_t)__builtin_neon_vrshrn_nv4si(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrn_n_s64(int64x2_t __a, const int __b)
{
return (int32x2_t)__builtin_neon_vrshrn_nv2di(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrn_n_u16(uint16x8_t __a, const int __b)
{
return (uint8x8_t)__builtin_neon_vrshrn_nv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrn_n_u32(uint32x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vrshrn_nv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrshrn_n_u64(uint64x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vrshrn_nv2di((int64x2_t)__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_n_s16(int16x8_t __a, const int __b)
{
return (int8x8_t)__builtin_neon_vqshrns_nv8hi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_n_s32(int32x4_t __a, const int __b)
{
return (int16x4_t)__builtin_neon_vqshrns_nv4si(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_n_s64(int64x2_t __a, const int __b)
{
return (int32x2_t)__builtin_neon_vqshrns_nv2di(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_n_u16(uint16x8_t __a, const int __b)
{
return (uint8x8_t)__builtin_neon_vqshrnu_nv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_n_u32(uint32x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vqshrnu_nv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshrn_n_u64(uint64x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vqshrnu_nv2di((int64x2_t)__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshrn_n_s16(int16x8_t __a, const int __b)
{
return (int8x8_t)__builtin_neon_vqrshrns_nv8hi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshrn_n_s32(int32x4_t __a, const int __b)
{
return (int16x4_t)__builtin_neon_vqrshrns_nv4si(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshrn_n_s64(int64x2_t __a, const int __b)
{
return (int32x2_t)__builtin_neon_vqrshrns_nv2di(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshrn_n_u16(uint16x8_t __a, const int __b)
{
return (uint8x8_t)__builtin_neon_vqrshrnu_nv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshrn_n_u32(uint32x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vqrshrnu_nv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshrn_n_u64(uint64x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vqrshrnu_nv2di((int64x2_t)__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshrun_n_s16(int16x8_t __a, const int __b)
{
return (uint8x8_t)__builtin_neon_vqshrun_nv8hi(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshrun_n_s32(int32x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vqshrun_nv4si(__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshrun_n_s64(int64x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vqshrun_nv2di(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshrun_n_s16(int16x8_t __a, const int __b)
{
return (uint8x8_t)__builtin_neon_vqrshrun_nv8hi(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshrun_n_s32(int32x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vqrshrun_nv4si(__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrshrun_n_s64(int64x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vqrshrun_nv2di(__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_s8(int8x8_t __a, const int __b)
{
return (int8x8_t)__builtin_neon_vshl_nv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_s16(int16x4_t __a, const int __b)
{
return (int16x4_t)__builtin_neon_vshl_nv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_s32(int32x2_t __a, const int __b)
{
return (int32x2_t)__builtin_neon_vshl_nv2si(__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_s64(int64x1_t __a, const int __b)
{
return (int64x1_t)__builtin_neon_vshl_ndi(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_u8(uint8x8_t __a, const int __b)
{
return (uint8x8_t)__builtin_neon_vshl_nv8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_u16(uint16x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vshl_nv4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_u32(uint32x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vshl_nv2si((int32x2_t)__a, __b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshl_n_u64(uint64x1_t __a, const int __b)
{
return (uint64x1_t)__builtin_neon_vshl_ndi((int64x1_t)__a, __b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_s8(int8x16_t __a, const int __b)
{
return (int8x16_t)__builtin_neon_vshl_nv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_s16(int16x8_t __a, const int __b)
{
return (int16x8_t)__builtin_neon_vshl_nv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_s32(int32x4_t __a, const int __b)
{
return (int32x4_t)__builtin_neon_vshl_nv4si(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_s64(int64x2_t __a, const int __b)
{
return (int64x2_t)__builtin_neon_vshl_nv2di(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_u8(uint8x16_t __a, const int __b)
{
return (uint8x16_t)__builtin_neon_vshl_nv16qi((int8x16_t)__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_u16(uint16x8_t __a, const int __b)
{
return (uint16x8_t)__builtin_neon_vshl_nv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_u32(uint32x4_t __a, const int __b)
{
return (uint32x4_t)__builtin_neon_vshl_nv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshlq_n_u64(uint64x2_t __a, const int __b)
{
return (uint64x2_t)__builtin_neon_vshl_nv2di((int64x2_t)__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_s8(int8x8_t __a, const int __b)
{
return (int8x8_t)__builtin_neon_vqshl_s_nv8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_s16(int16x4_t __a, const int __b)
{
return (int16x4_t)__builtin_neon_vqshl_s_nv4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_s32(int32x2_t __a, const int __b)
{
return (int32x2_t)__builtin_neon_vqshl_s_nv2si(__a, __b);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_s64(int64x1_t __a, const int __b)
{
return (int64x1_t)__builtin_neon_vqshl_s_ndi(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_u8(uint8x8_t __a, const int __b)
{
return (uint8x8_t)__builtin_neon_vqshl_u_nv8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_u16(uint16x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vqshl_u_nv4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_u32(uint32x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vqshl_u_nv2si((int32x2_t)__a, __b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshl_n_u64(uint64x1_t __a, const int __b)
{
return (uint64x1_t)__builtin_neon_vqshl_u_ndi((int64x1_t)__a, __b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_s8(int8x16_t __a, const int __b)
{
return (int8x16_t)__builtin_neon_vqshl_s_nv16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_s16(int16x8_t __a, const int __b)
{
return (int16x8_t)__builtin_neon_vqshl_s_nv8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_s32(int32x4_t __a, const int __b)
{
return (int32x4_t)__builtin_neon_vqshl_s_nv4si(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_s64(int64x2_t __a, const int __b)
{
return (int64x2_t)__builtin_neon_vqshl_s_nv2di(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_u8(uint8x16_t __a, const int __b)
{
return (uint8x16_t)__builtin_neon_vqshl_u_nv16qi((int8x16_t)__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_u16(uint16x8_t __a, const int __b)
{
return (uint16x8_t)__builtin_neon_vqshl_u_nv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_u32(uint32x4_t __a, const int __b)
{
return (uint32x4_t)__builtin_neon_vqshl_u_nv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlq_n_u64(uint64x2_t __a, const int __b)
{
return (uint64x2_t)__builtin_neon_vqshl_u_nv2di((int64x2_t)__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlu_n_s8(int8x8_t __a, const int __b)
{
return (uint8x8_t)__builtin_neon_vqshlu_nv8qi(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlu_n_s16(int16x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vqshlu_nv4hi(__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlu_n_s32(int32x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vqshlu_nv2si(__a, __b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshlu_n_s64(int64x1_t __a, const int __b)
{
return (uint64x1_t)__builtin_neon_vqshlu_ndi(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshluq_n_s8(int8x16_t __a, const int __b)
{
return (uint8x16_t)__builtin_neon_vqshlu_nv16qi(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshluq_n_s16(int16x8_t __a, const int __b)
{
return (uint16x8_t)__builtin_neon_vqshlu_nv8hi(__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshluq_n_s32(int32x4_t __a, const int __b)
{
return (uint32x4_t)__builtin_neon_vqshlu_nv4si(__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqshluq_n_s64(int64x2_t __a, const int __b)
{
return (uint64x2_t)__builtin_neon_vqshlu_nv2di(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshll_n_s8(int8x8_t __a, const int __b)
{
return (int16x8_t)__builtin_neon_vshlls_nv8qi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshll_n_s16(int16x4_t __a, const int __b)
{
return (int32x4_t)__builtin_neon_vshlls_nv4hi(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshll_n_s32(int32x2_t __a, const int __b)
{
return (int64x2_t)__builtin_neon_vshlls_nv2si(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshll_n_u8(uint8x8_t __a, const int __b)
{
return (uint16x8_t)__builtin_neon_vshllu_nv8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshll_n_u16(uint16x4_t __a, const int __b)
{
return (uint32x4_t)__builtin_neon_vshllu_nv4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vshll_n_u32(uint32x2_t __a, const int __b)
{
return (uint64x2_t)__builtin_neon_vshllu_nv2si((int32x2_t)__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_s8(int8x8_t __a, int8x8_t __b, const int __c)
{
return (int8x8_t)__builtin_neon_vsras_nv8qi(__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_s16(int16x4_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t)__builtin_neon_vsras_nv4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_s32(int32x2_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t)__builtin_neon_vsras_nv2si(__a, __b, __c);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_s64(int64x1_t __a, int64x1_t __b, const int __c)
{
return (int64x1_t)__builtin_neon_vsras_ndi(__a, __b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_u8(uint8x8_t __a, uint8x8_t __b, const int __c)
{
return (uint8x8_t)__builtin_neon_vsrau_nv8qi((int8x8_t)__a, (int8x8_t)__b,
__c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_u16(uint16x4_t __a, uint16x4_t __b, const int __c)
{
return (uint16x4_t)__builtin_neon_vsrau_nv4hi((int16x4_t)__a,
(int16x4_t)__b, __c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_u32(uint32x2_t __a, uint32x2_t __b, const int __c)
{
return (uint32x2_t)__builtin_neon_vsrau_nv2si((int32x2_t)__a,
(int32x2_t)__b, __c);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsra_n_u64(uint64x1_t __a, uint64x1_t __b, const int __c)
{
return (uint64x1_t)__builtin_neon_vsrau_ndi((int64x1_t)__a, (int64x1_t)__b,
__c);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_s8(int8x16_t __a, int8x16_t __b, const int __c)
{
return (int8x16_t)__builtin_neon_vsras_nv16qi(__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_s16(int16x8_t __a, int16x8_t __b, const int __c)
{
return (int16x8_t)__builtin_neon_vsras_nv8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_s32(int32x4_t __a, int32x4_t __b, const int __c)
{
return (int32x4_t)__builtin_neon_vsras_nv4si(__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_s64(int64x2_t __a, int64x2_t __b, const int __c)
{
return (int64x2_t)__builtin_neon_vsras_nv2di(__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_u8(uint8x16_t __a, uint8x16_t __b, const int __c)
{
return (uint8x16_t)__builtin_neon_vsrau_nv16qi((int8x16_t)__a,
(int8x16_t)__b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_u16(uint16x8_t __a, uint16x8_t __b, const int __c)
{
return (uint16x8_t)__builtin_neon_vsrau_nv8hi((int16x8_t)__a,
(int16x8_t)__b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_u32(uint32x4_t __a, uint32x4_t __b, const int __c)
{
return (uint32x4_t)__builtin_neon_vsrau_nv4si((int32x4_t)__a,
(int32x4_t)__b, __c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsraq_n_u64(uint64x2_t __a, uint64x2_t __b, const int __c)
{
return (uint64x2_t)__builtin_neon_vsrau_nv2di((int64x2_t)__a,
(int64x2_t)__b, __c);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_s8(int8x8_t __a, int8x8_t __b, const int __c)
{
return (int8x8_t)__builtin_neon_vrsras_nv8qi(__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_s16(int16x4_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t)__builtin_neon_vrsras_nv4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_s32(int32x2_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t)__builtin_neon_vrsras_nv2si(__a, __b, __c);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_s64(int64x1_t __a, int64x1_t __b, const int __c)
{
return (int64x1_t)__builtin_neon_vrsras_ndi(__a, __b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_u8(uint8x8_t __a, uint8x8_t __b, const int __c)
{
return (uint8x8_t)__builtin_neon_vrsrau_nv8qi((int8x8_t)__a, (int8x8_t)__b,
__c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_u16(uint16x4_t __a, uint16x4_t __b, const int __c)
{
return (uint16x4_t)__builtin_neon_vrsrau_nv4hi((int16x4_t)__a,
(int16x4_t)__b, __c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_u32(uint32x2_t __a, uint32x2_t __b, const int __c)
{
return (uint32x2_t)__builtin_neon_vrsrau_nv2si((int32x2_t)__a,
(int32x2_t)__b, __c);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsra_n_u64(uint64x1_t __a, uint64x1_t __b, const int __c)
{
return (uint64x1_t)__builtin_neon_vrsrau_ndi((int64x1_t)__a, (int64x1_t)__b,
__c);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_s8(int8x16_t __a, int8x16_t __b, const int __c)
{
return (int8x16_t)__builtin_neon_vrsras_nv16qi(__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_s16(int16x8_t __a, int16x8_t __b, const int __c)
{
return (int16x8_t)__builtin_neon_vrsras_nv8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_s32(int32x4_t __a, int32x4_t __b, const int __c)
{
return (int32x4_t)__builtin_neon_vrsras_nv4si(__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_s64(int64x2_t __a, int64x2_t __b, const int __c)
{
return (int64x2_t)__builtin_neon_vrsras_nv2di(__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_u8(uint8x16_t __a, uint8x16_t __b, const int __c)
{
return (uint8x16_t)__builtin_neon_vrsrau_nv16qi((int8x16_t)__a,
(int8x16_t)__b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_u16(uint16x8_t __a, uint16x8_t __b, const int __c)
{
return (uint16x8_t)__builtin_neon_vrsrau_nv8hi((int16x8_t)__a,
(int16x8_t)__b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_u32(uint32x4_t __a, uint32x4_t __b, const int __c)
{
return (uint32x4_t)__builtin_neon_vrsrau_nv4si((int32x4_t)__a,
(int32x4_t)__b, __c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsraq_n_u64(uint64x2_t __a, uint64x2_t __b, const int __c)
{
return (uint64x2_t)__builtin_neon_vrsrau_nv2di((int64x2_t)__a,
(int64x2_t)__b, __c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_p64(poly64x1_t __a, poly64x1_t __b, const int __c)
{
return (poly64x1_t)__builtin_neon_vsri_ndi(__a, __b, __c);
}
#pragma GCC pop_options
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_s8(int8x8_t __a, int8x8_t __b, const int __c)
{
return (int8x8_t)__builtin_neon_vsri_nv8qi(__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_s16(int16x4_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t)__builtin_neon_vsri_nv4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_s32(int32x2_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t)__builtin_neon_vsri_nv2si(__a, __b, __c);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_s64(int64x1_t __a, int64x1_t __b, const int __c)
{
return (int64x1_t)__builtin_neon_vsri_ndi(__a, __b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_u8(uint8x8_t __a, uint8x8_t __b, const int __c)
{
return (uint8x8_t)__builtin_neon_vsri_nv8qi((int8x8_t)__a, (int8x8_t)__b,
__c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_u16(uint16x4_t __a, uint16x4_t __b, const int __c)
{
return (uint16x4_t)__builtin_neon_vsri_nv4hi((int16x4_t)__a, (int16x4_t)__b,
__c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_u32(uint32x2_t __a, uint32x2_t __b, const int __c)
{
return (uint32x2_t)__builtin_neon_vsri_nv2si((int32x2_t)__a, (int32x2_t)__b,
__c);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_u64(uint64x1_t __a, uint64x1_t __b, const int __c)
{
return (uint64x1_t)__builtin_neon_vsri_ndi((int64x1_t)__a, (int64x1_t)__b,
__c);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_p8(poly8x8_t __a, poly8x8_t __b, const int __c)
{
return (poly8x8_t)__builtin_neon_vsri_nv8qi((int8x8_t)__a, (int8x8_t)__b,
__c);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsri_n_p16(poly16x4_t __a, poly16x4_t __b, const int __c)
{
return (poly16x4_t)__builtin_neon_vsri_nv4hi((int16x4_t)__a, (int16x4_t)__b,
__c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_p64(poly64x2_t __a, poly64x2_t __b, const int __c)
{
return (poly64x2_t)__builtin_neon_vsri_nv2di((int64x2_t)__a, (int64x2_t)__b,
__c);
}
#pragma GCC pop_options
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_s8(int8x16_t __a, int8x16_t __b, const int __c)
{
return (int8x16_t)__builtin_neon_vsri_nv16qi(__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_s16(int16x8_t __a, int16x8_t __b, const int __c)
{
return (int16x8_t)__builtin_neon_vsri_nv8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_s32(int32x4_t __a, int32x4_t __b, const int __c)
{
return (int32x4_t)__builtin_neon_vsri_nv4si(__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_s64(int64x2_t __a, int64x2_t __b, const int __c)
{
return (int64x2_t)__builtin_neon_vsri_nv2di(__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_u8(uint8x16_t __a, uint8x16_t __b, const int __c)
{
return (uint8x16_t)__builtin_neon_vsri_nv16qi((int8x16_t)__a,
(int8x16_t)__b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_u16(uint16x8_t __a, uint16x8_t __b, const int __c)
{
return (uint16x8_t)__builtin_neon_vsri_nv8hi((int16x8_t)__a, (int16x8_t)__b,
__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_u32(uint32x4_t __a, uint32x4_t __b, const int __c)
{
return (uint32x4_t)__builtin_neon_vsri_nv4si((int32x4_t)__a, (int32x4_t)__b,
__c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_u64(uint64x2_t __a, uint64x2_t __b, const int __c)
{
return (uint64x2_t)__builtin_neon_vsri_nv2di((int64x2_t)__a, (int64x2_t)__b,
__c);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_p8(poly8x16_t __a, poly8x16_t __b, const int __c)
{
return (poly8x16_t)__builtin_neon_vsri_nv16qi((int8x16_t)__a,
(int8x16_t)__b, __c);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsriq_n_p16(poly16x8_t __a, poly16x8_t __b, const int __c)
{
return (poly16x8_t)__builtin_neon_vsri_nv8hi((int16x8_t)__a, (int16x8_t)__b,
__c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_p64(poly64x1_t __a, poly64x1_t __b, const int __c)
{
return (poly64x1_t)__builtin_neon_vsli_ndi(__a, __b, __c);
}
#pragma GCC pop_options
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_s8(int8x8_t __a, int8x8_t __b, const int __c)
{
return (int8x8_t)__builtin_neon_vsli_nv8qi(__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_s16(int16x4_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t)__builtin_neon_vsli_nv4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_s32(int32x2_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t)__builtin_neon_vsli_nv2si(__a, __b, __c);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_s64(int64x1_t __a, int64x1_t __b, const int __c)
{
return (int64x1_t)__builtin_neon_vsli_ndi(__a, __b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_u8(uint8x8_t __a, uint8x8_t __b, const int __c)
{
return (uint8x8_t)__builtin_neon_vsli_nv8qi((int8x8_t)__a, (int8x8_t)__b,
__c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_u16(uint16x4_t __a, uint16x4_t __b, const int __c)
{
return (uint16x4_t)__builtin_neon_vsli_nv4hi((int16x4_t)__a, (int16x4_t)__b,
__c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_u32(uint32x2_t __a, uint32x2_t __b, const int __c)
{
return (uint32x2_t)__builtin_neon_vsli_nv2si((int32x2_t)__a, (int32x2_t)__b,
__c);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_u64(uint64x1_t __a, uint64x1_t __b, const int __c)
{
return (uint64x1_t)__builtin_neon_vsli_ndi((int64x1_t)__a, (int64x1_t)__b,
__c);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_p8(poly8x8_t __a, poly8x8_t __b, const int __c)
{
return (poly8x8_t)__builtin_neon_vsli_nv8qi((int8x8_t)__a, (int8x8_t)__b,
__c);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsli_n_p16(poly16x4_t __a, poly16x4_t __b, const int __c)
{
return (poly16x4_t)__builtin_neon_vsli_nv4hi((int16x4_t)__a, (int16x4_t)__b,
__c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_p64(poly64x2_t __a, poly64x2_t __b, const int __c)
{
return (poly64x2_t)__builtin_neon_vsli_nv2di((int64x2_t)__a, (int64x2_t)__b,
__c);
}
#pragma GCC pop_options
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_s8(int8x16_t __a, int8x16_t __b, const int __c)
{
return (int8x16_t)__builtin_neon_vsli_nv16qi(__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_s16(int16x8_t __a, int16x8_t __b, const int __c)
{
return (int16x8_t)__builtin_neon_vsli_nv8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_s32(int32x4_t __a, int32x4_t __b, const int __c)
{
return (int32x4_t)__builtin_neon_vsli_nv4si(__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_s64(int64x2_t __a, int64x2_t __b, const int __c)
{
return (int64x2_t)__builtin_neon_vsli_nv2di(__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_u8(uint8x16_t __a, uint8x16_t __b, const int __c)
{
return (uint8x16_t)__builtin_neon_vsli_nv16qi((int8x16_t)__a,
(int8x16_t)__b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_u16(uint16x8_t __a, uint16x8_t __b, const int __c)
{
return (uint16x8_t)__builtin_neon_vsli_nv8hi((int16x8_t)__a, (int16x8_t)__b,
__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_u32(uint32x4_t __a, uint32x4_t __b, const int __c)
{
return (uint32x4_t)__builtin_neon_vsli_nv4si((int32x4_t)__a, (int32x4_t)__b,
__c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_u64(uint64x2_t __a, uint64x2_t __b, const int __c)
{
return (uint64x2_t)__builtin_neon_vsli_nv2di((int64x2_t)__a, (int64x2_t)__b,
__c);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_p8(poly8x16_t __a, poly8x16_t __b, const int __c)
{
return (poly8x16_t)__builtin_neon_vsli_nv16qi((int8x16_t)__a,
(int8x16_t)__b, __c);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsliq_n_p16(poly16x8_t __a, poly16x8_t __b, const int __c)
{
return (poly16x8_t)__builtin_neon_vsli_nv8hi((int16x8_t)__a, (int16x8_t)__b,
__c);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabs_s8(int8x8_t __a)
{
return (int8x8_t)__builtin_neon_vabsv8qi(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabs_s16(int16x4_t __a)
{
return (int16x4_t)__builtin_neon_vabsv4hi(__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabs_s32(int32x2_t __a)
{
return (int32x2_t)__builtin_neon_vabsv2si(__a);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabs_f32(float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vabsv2sf(__a);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabsq_s8(int8x16_t __a)
{
return (int8x16_t)__builtin_neon_vabsv16qi(__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabsq_s16(int16x8_t __a)
{
return (int16x8_t)__builtin_neon_vabsv8hi(__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabsq_s32(int32x4_t __a)
{
return (int32x4_t)__builtin_neon_vabsv4si(__a);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabsq_f32(float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vabsv4sf(__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqabs_s8(int8x8_t __a)
{
return (int8x8_t)__builtin_neon_vqabsv8qi(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqabs_s16(int16x4_t __a)
{
return (int16x4_t)__builtin_neon_vqabsv4hi(__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqabs_s32(int32x2_t __a)
{
return (int32x2_t)__builtin_neon_vqabsv2si(__a);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqabsq_s8(int8x16_t __a)
{
return (int8x16_t)__builtin_neon_vqabsv16qi(__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqabsq_s16(int16x8_t __a)
{
return (int16x8_t)__builtin_neon_vqabsv8hi(__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqabsq_s32(int32x4_t __a)
{
return (int32x4_t)__builtin_neon_vqabsv4si(__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vneg_s8(int8x8_t __a)
{
return (int8x8_t)__builtin_neon_vnegv8qi(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vneg_s16(int16x4_t __a)
{
return (int16x4_t)__builtin_neon_vnegv4hi(__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vneg_s32(int32x2_t __a)
{
return (int32x2_t)__builtin_neon_vnegv2si(__a);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vneg_f32(float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vnegv2sf(__a);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vnegq_s8(int8x16_t __a)
{
return (int8x16_t)__builtin_neon_vnegv16qi(__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vnegq_s16(int16x8_t __a)
{
return (int16x8_t)__builtin_neon_vnegv8hi(__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vnegq_s32(int32x4_t __a)
{
return (int32x4_t)__builtin_neon_vnegv4si(__a);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vnegq_f32(float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vnegv4sf(__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqneg_s8(int8x8_t __a)
{
return (int8x8_t)__builtin_neon_vqnegv8qi(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqneg_s16(int16x4_t __a)
{
return (int16x4_t)__builtin_neon_vqnegv4hi(__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqneg_s32(int32x2_t __a)
{
return (int32x2_t)__builtin_neon_vqnegv2si(__a);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqnegq_s8(int8x16_t __a)
{
return (int8x16_t)__builtin_neon_vqnegv16qi(__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqnegq_s16(int16x8_t __a)
{
return (int16x8_t)__builtin_neon_vqnegv8hi(__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqnegq_s32(int32x4_t __a)
{
return (int32x4_t)__builtin_neon_vqnegv4si(__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvn_s8(int8x8_t __a)
{
return (int8x8_t)__builtin_neon_vmvnv8qi(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvn_s16(int16x4_t __a)
{
return (int16x4_t)__builtin_neon_vmvnv4hi(__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvn_s32(int32x2_t __a)
{
return (int32x2_t)__builtin_neon_vmvnv2si(__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvn_u8(uint8x8_t __a)
{
return (uint8x8_t)__builtin_neon_vmvnv8qi((int8x8_t)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvn_u16(uint16x4_t __a)
{
return (uint16x4_t)__builtin_neon_vmvnv4hi((int16x4_t)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvn_u32(uint32x2_t __a)
{
return (uint32x2_t)__builtin_neon_vmvnv2si((int32x2_t)__a);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvn_p8(poly8x8_t __a)
{
return (poly8x8_t)__builtin_neon_vmvnv8qi((int8x8_t)__a);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvnq_s8(int8x16_t __a)
{
return (int8x16_t)__builtin_neon_vmvnv16qi(__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvnq_s16(int16x8_t __a)
{
return (int16x8_t)__builtin_neon_vmvnv8hi(__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvnq_s32(int32x4_t __a)
{
return (int32x4_t)__builtin_neon_vmvnv4si(__a);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvnq_u8(uint8x16_t __a)
{
return (uint8x16_t)__builtin_neon_vmvnv16qi((int8x16_t)__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvnq_u16(uint16x8_t __a)
{
return (uint16x8_t)__builtin_neon_vmvnv8hi((int16x8_t)__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvnq_u32(uint32x4_t __a)
{
return (uint32x4_t)__builtin_neon_vmvnv4si((int32x4_t)__a);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmvnq_p8(poly8x16_t __a)
{
return (poly8x16_t)__builtin_neon_vmvnv16qi((int8x16_t)__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcls_s8(int8x8_t __a)
{
return (int8x8_t)__builtin_neon_vclsv8qi(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcls_s16(int16x4_t __a)
{
return (int16x4_t)__builtin_neon_vclsv4hi(__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcls_s32(int32x2_t __a)
{
return (int32x2_t)__builtin_neon_vclsv2si(__a);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclsq_s8(int8x16_t __a)
{
return (int8x16_t)__builtin_neon_vclsv16qi(__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclsq_s16(int16x8_t __a)
{
return (int16x8_t)__builtin_neon_vclsv8hi(__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclsq_s32(int32x4_t __a)
{
return (int32x4_t)__builtin_neon_vclsv4si(__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclz_s8(int8x8_t __a)
{
return (int8x8_t)__builtin_neon_vclzv8qi(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclz_s16(int16x4_t __a)
{
return (int16x4_t)__builtin_neon_vclzv4hi(__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclz_s32(int32x2_t __a)
{
return (int32x2_t)__builtin_neon_vclzv2si(__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclz_u8(uint8x8_t __a)
{
return (uint8x8_t)__builtin_neon_vclzv8qi((int8x8_t)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclz_u16(uint16x4_t __a)
{
return (uint16x4_t)__builtin_neon_vclzv4hi((int16x4_t)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclz_u32(uint32x2_t __a)
{
return (uint32x2_t)__builtin_neon_vclzv2si((int32x2_t)__a);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclzq_s8(int8x16_t __a)
{
return (int8x16_t)__builtin_neon_vclzv16qi(__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclzq_s16(int16x8_t __a)
{
return (int16x8_t)__builtin_neon_vclzv8hi(__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclzq_s32(int32x4_t __a)
{
return (int32x4_t)__builtin_neon_vclzv4si(__a);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclzq_u8(uint8x16_t __a)
{
return (uint8x16_t)__builtin_neon_vclzv16qi((int8x16_t)__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclzq_u16(uint16x8_t __a)
{
return (uint16x8_t)__builtin_neon_vclzv8hi((int16x8_t)__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclzq_u32(uint32x4_t __a)
{
return (uint32x4_t)__builtin_neon_vclzv4si((int32x4_t)__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcnt_s8(int8x8_t __a)
{
return (int8x8_t)__builtin_neon_vcntv8qi(__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcnt_u8(uint8x8_t __a)
{
return (uint8x8_t)__builtin_neon_vcntv8qi((int8x8_t)__a);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcnt_p8(poly8x8_t __a)
{
return (poly8x8_t)__builtin_neon_vcntv8qi((int8x8_t)__a);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcntq_s8(int8x16_t __a)
{
return (int8x16_t)__builtin_neon_vcntv16qi(__a);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcntq_u8(uint8x16_t __a)
{
return (uint8x16_t)__builtin_neon_vcntv16qi((int8x16_t)__a);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcntq_p8(poly8x16_t __a)
{
return (poly8x16_t)__builtin_neon_vcntv16qi((int8x16_t)__a);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrecpe_f32(float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrecpev2sf(__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrecpe_u32(uint32x2_t __a)
{
return (uint32x2_t)__builtin_neon_vrecpev2si((int32x2_t)__a);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrecpeq_f32(float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrecpev4sf(__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrecpeq_u32(uint32x4_t __a)
{
return (uint32x4_t)__builtin_neon_vrecpev4si((int32x4_t)__a);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsqrte_f32(float32x2_t __a)
{
return (float32x2_t)__builtin_neon_vrsqrtev2sf(__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsqrte_u32(uint32x2_t __a)
{
return (uint32x2_t)__builtin_neon_vrsqrtev2si((int32x2_t)__a);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsqrteq_f32(float32x4_t __a)
{
return (float32x4_t)__builtin_neon_vrsqrtev4sf(__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsqrteq_u32(uint32x4_t __a)
{
return (uint32x4_t)__builtin_neon_vrsqrtev4si((int32x4_t)__a);
}
__extension__ extern __inline int8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_s8(int8x8_t __a, const int __b)
{
return (int8_t)__builtin_neon_vget_lanev8qi(__a, __b);
}
__extension__ extern __inline int16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_s16(int16x4_t __a, const int __b)
{
return (int16_t)__builtin_neon_vget_lanev4hi(__a, __b);
}
__extension__ extern __inline int32_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_s32(int32x2_t __a, const int __b)
{
return (int32_t)__builtin_neon_vget_lanev2si(__a, __b);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
/* Functions cannot accept or return __FP16 types. Even if the function
were marked always-inline so there were no call sites, the declaration
would nonetheless raise an error. Hence, we must use a macro instead. */
/* For big-endian, GCC's vector indices are reversed within each 64
bits compared to the architectural lane indices used by Neon
intrinsics. */
#ifdef __ARM_BIG_ENDIAN
#define __ARM_NUM_LANES(__v) (sizeof(__v) / sizeof(__v[0]))
#define __arm_lane(__vec, __idx) (__idx ^ (__ARM_NUM_LANES(__vec) - 1))
#define __arm_laneq(__vec, __idx) (__idx ^ (__ARM_NUM_LANES(__vec) / 2 - 1))
#else
#define __arm_lane(__vec, __idx) __idx
#define __arm_laneq(__vec, __idx) __idx
#endif
#define vget_lane_f16(__v, __idx) \
__extension__({ \
float16x4_t __vec = (__v); \
__builtin_arm_lane_check(4, __idx); \
float16_t __res = __vec[__arm_lane(__vec, __idx)]; \
__res; \
})
#endif
__extension__ extern __inline float32_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_f32(float32x2_t __a, const int __b)
{
return (float32_t)__builtin_neon_vget_lanev2sf(__a, __b);
}
__extension__ extern __inline uint8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_u8(uint8x8_t __a, const int __b)
{
return (uint8_t)__builtin_neon_vget_laneuv8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline uint16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_u16(uint16x4_t __a, const int __b)
{
return (uint16_t)__builtin_neon_vget_laneuv4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline uint32_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_u32(uint32x2_t __a, const int __b)
{
return (uint32_t)__builtin_neon_vget_laneuv2si((int32x2_t)__a, __b);
}
__extension__ extern __inline poly8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_p8(poly8x8_t __a, const int __b)
{
return (poly8_t)__builtin_neon_vget_laneuv8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline poly16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_p16(poly16x4_t __a, const int __b)
{
return (poly16_t)__builtin_neon_vget_laneuv4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline int64_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_s64(int64x1_t __a, const int __b)
{
return (int64_t)__builtin_neon_vget_lanedi(__a, __b);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_p64(poly64x1_t __a, const int __b)
{
return (poly64_t)__builtin_neon_vget_lanedi((int64x1_t)__a, __b);
}
#pragma GCC pop_options
__extension__ extern __inline uint64_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_lane_u64(uint64x1_t __a, const int __b)
{
return (uint64_t)__builtin_neon_vget_lanedi((int64x1_t)__a, __b);
}
__extension__ extern __inline int8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_s8(int8x16_t __a, const int __b)
{
return (int8_t)__builtin_neon_vget_lanev16qi(__a, __b);
}
__extension__ extern __inline int16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_s16(int16x8_t __a, const int __b)
{
return (int16_t)__builtin_neon_vget_lanev8hi(__a, __b);
}
__extension__ extern __inline int32_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_s32(int32x4_t __a, const int __b)
{
return (int32_t)__builtin_neon_vget_lanev4si(__a, __b);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
#define vgetq_lane_f16(__v, __idx) \
__extension__({ \
float16x8_t __vec = (__v); \
__builtin_arm_lane_check(8, __idx); \
float16_t __res = __vec[__arm_laneq(__vec, __idx)]; \
__res; \
})
#endif
__extension__ extern __inline float32_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_f32(float32x4_t __a, const int __b)
{
return (float32_t)__builtin_neon_vget_lanev4sf(__a, __b);
}
__extension__ extern __inline uint8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_u8(uint8x16_t __a, const int __b)
{
return (uint8_t)__builtin_neon_vget_laneuv16qi((int8x16_t)__a, __b);
}
__extension__ extern __inline uint16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_u16(uint16x8_t __a, const int __b)
{
return (uint16_t)__builtin_neon_vget_laneuv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline uint32_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_u32(uint32x4_t __a, const int __b)
{
return (uint32_t)__builtin_neon_vget_laneuv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline poly8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_p8(poly8x16_t __a, const int __b)
{
return (poly8_t)__builtin_neon_vget_laneuv16qi((int8x16_t)__a, __b);
}
__extension__ extern __inline poly16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_p16(poly16x8_t __a, const int __b)
{
return (poly16_t)__builtin_neon_vget_laneuv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline int64_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_s64(int64x2_t __a, const int __b)
{
return (int64_t)__builtin_neon_vget_lanev2di(__a, __b);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_p64(poly64x2_t __a, const int __b)
{
return (poly64_t)__builtin_neon_vget_lanev2di((int64x2_t)__a, __b);
}
#pragma GCC pop_options
__extension__ extern __inline uint64_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vgetq_lane_u64(uint64x2_t __a, const int __b)
{
return (uint64_t)__builtin_neon_vget_lanev2di((int64x2_t)__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_s8(int8_t __a, int8x8_t __b, const int __c)
{
return (int8x8_t)__builtin_neon_vset_lanev8qi((__builtin_neon_qi)__a, __b,
__c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_s16(int16_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t)__builtin_neon_vset_lanev4hi((__builtin_neon_hi)__a, __b,
__c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_s32(int32_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t)__builtin_neon_vset_lanev2si((__builtin_neon_si)__a, __b,
__c);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
#define vset_lane_f16(__e, __v, __idx) \
__extension__({ \
float16_t __elem = (__e); \
float16x4_t __vec = (__v); \
__builtin_arm_lane_check(4, __idx); \
__vec[__arm_lane(__vec, __idx)] = __elem; \
__vec; \
})
#endif
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_f32(float32_t __a, float32x2_t __b, const int __c)
{
return (float32x2_t)__builtin_neon_vset_lanev2sf((__builtin_neon_sf)__a,
__b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_u8(uint8_t __a, uint8x8_t __b, const int __c)
{
return (uint8x8_t)__builtin_neon_vset_lanev8qi((__builtin_neon_qi)__a,
(int8x8_t)__b, __c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_u16(uint16_t __a, uint16x4_t __b, const int __c)
{
return (uint16x4_t)__builtin_neon_vset_lanev4hi((__builtin_neon_hi)__a,
(int16x4_t)__b, __c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_u32(uint32_t __a, uint32x2_t __b, const int __c)
{
return (uint32x2_t)__builtin_neon_vset_lanev2si((__builtin_neon_si)__a,
(int32x2_t)__b, __c);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_p8(poly8_t __a, poly8x8_t __b, const int __c)
{
return (poly8x8_t)__builtin_neon_vset_lanev8qi((__builtin_neon_qi)__a,
(int8x8_t)__b, __c);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_p16(poly16_t __a, poly16x4_t __b, const int __c)
{
return (poly16x4_t)__builtin_neon_vset_lanev4hi((__builtin_neon_hi)__a,
(int16x4_t)__b, __c);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_s64(int64_t __a, int64x1_t __b, const int __c)
{
return (int64x1_t)__builtin_neon_vset_lanedi((__builtin_neon_di)__a, __b,
__c);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_u64(uint64_t __a, uint64x1_t __b, const int __c)
{
return (uint64x1_t)__builtin_neon_vset_lanedi((__builtin_neon_di)__a,
(int64x1_t)__b, __c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vset_lane_p64(poly64_t __a, poly64x1_t __b, const int __c)
{
return (poly64x1_t)__builtin_neon_vset_lanedi((__builtin_neon_di)__a,
(int64x1_t)__b, __c);
}
#pragma GCC pop_options
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_s8(int8_t __a, int8x16_t __b, const int __c)
{
return (int8x16_t)__builtin_neon_vset_lanev16qi((__builtin_neon_qi)__a, __b,
__c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_s16(int16_t __a, int16x8_t __b, const int __c)
{
return (int16x8_t)__builtin_neon_vset_lanev8hi((__builtin_neon_hi)__a, __b,
__c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_s32(int32_t __a, int32x4_t __b, const int __c)
{
return (int32x4_t)__builtin_neon_vset_lanev4si((__builtin_neon_si)__a, __b,
__c);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
#define vsetq_lane_f16(__e, __v, __idx) \
__extension__({ \
float16_t __elem = (__e); \
float16x8_t __vec = (__v); \
__builtin_arm_lane_check(8, __idx); \
__vec[__arm_laneq(__vec, __idx)] = __elem; \
__vec; \
})
#endif
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_f32(float32_t __a, float32x4_t __b, const int __c)
{
return (float32x4_t)__builtin_neon_vset_lanev4sf((__builtin_neon_sf)__a,
__b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_u8(uint8_t __a, uint8x16_t __b, const int __c)
{
return (uint8x16_t)__builtin_neon_vset_lanev16qi((__builtin_neon_qi)__a,
(int8x16_t)__b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_u16(uint16_t __a, uint16x8_t __b, const int __c)
{
return (uint16x8_t)__builtin_neon_vset_lanev8hi((__builtin_neon_hi)__a,
(int16x8_t)__b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_u32(uint32_t __a, uint32x4_t __b, const int __c)
{
return (uint32x4_t)__builtin_neon_vset_lanev4si((__builtin_neon_si)__a,
(int32x4_t)__b, __c);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_p8(poly8_t __a, poly8x16_t __b, const int __c)
{
return (poly8x16_t)__builtin_neon_vset_lanev16qi((__builtin_neon_qi)__a,
(int8x16_t)__b, __c);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_p16(poly16_t __a, poly16x8_t __b, const int __c)
{
return (poly16x8_t)__builtin_neon_vset_lanev8hi((__builtin_neon_hi)__a,
(int16x8_t)__b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_s64(int64_t __a, int64x2_t __b, const int __c)
{
return (int64x2_t)__builtin_neon_vset_lanev2di((__builtin_neon_di)__a, __b,
__c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_u64(uint64_t __a, uint64x2_t __b, const int __c)
{
return (uint64x2_t)__builtin_neon_vset_lanev2di((__builtin_neon_di)__a,
(int64x2_t)__b, __c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsetq_lane_p64(poly64_t __a, poly64x2_t __b, const int __c)
{
return (poly64x2_t)__builtin_neon_vset_lanev2di((__builtin_neon_di)__a,
(int64x2_t)__b, __c);
}
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_p64(uint64_t __a)
{
return (poly64x1_t)__builtin_neon_vcreatedi((__builtin_neon_di)__a);
}
#pragma GCC pop_options
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_s8(uint64_t __a)
{
return (int8x8_t)__builtin_neon_vcreatev8qi((__builtin_neon_di)__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_s16(uint64_t __a)
{
return (int16x4_t)__builtin_neon_vcreatev4hi((__builtin_neon_di)__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_s32(uint64_t __a)
{
return (int32x2_t)__builtin_neon_vcreatev2si((__builtin_neon_di)__a);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_s64(uint64_t __a)
{
return (int64x1_t)__builtin_neon_vcreatedi((__builtin_neon_di)__a);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_f16(uint64_t __a)
{
return (float16x4_t)__a;
}
#endif
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_f32(uint64_t __a)
{
return (float32x2_t)__builtin_neon_vcreatev2sf((__builtin_neon_di)__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_u8(uint64_t __a)
{
return (uint8x8_t)__builtin_neon_vcreatev8qi((__builtin_neon_di)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_u16(uint64_t __a)
{
return (uint16x4_t)__builtin_neon_vcreatev4hi((__builtin_neon_di)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_u32(uint64_t __a)
{
return (uint32x2_t)__builtin_neon_vcreatev2si((__builtin_neon_di)__a);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_u64(uint64_t __a)
{
return (uint64x1_t)__builtin_neon_vcreatedi((__builtin_neon_di)__a);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_p8(uint64_t __a)
{
return (poly8x8_t)__builtin_neon_vcreatev8qi((__builtin_neon_di)__a);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcreate_p16(uint64_t __a)
{
return (poly16x4_t)__builtin_neon_vcreatev4hi((__builtin_neon_di)__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_s8(int8_t __a)
{
return (int8x8_t)__builtin_neon_vdup_nv8qi((__builtin_neon_qi)__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_s16(int16_t __a)
{
return (int16x4_t)__builtin_neon_vdup_nv4hi((__builtin_neon_hi)__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_s32(int32_t __a)
{
return (int32x2_t)__builtin_neon_vdup_nv2si((__builtin_neon_si)__a);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_f32(float32_t __a)
{
return (float32x2_t)__builtin_neon_vdup_nv2sf((__builtin_neon_sf)__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_u8(uint8_t __a)
{
return (uint8x8_t)__builtin_neon_vdup_nv8qi((__builtin_neon_qi)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_u16(uint16_t __a)
{
return (uint16x4_t)__builtin_neon_vdup_nv4hi((__builtin_neon_hi)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_u32(uint32_t __a)
{
return (uint32x2_t)__builtin_neon_vdup_nv2si((__builtin_neon_si)__a);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_p8(poly8_t __a)
{
return (poly8x8_t)__builtin_neon_vdup_nv8qi((__builtin_neon_qi)__a);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_p16(poly16_t __a)
{
return (poly16x4_t)__builtin_neon_vdup_nv4hi((__builtin_neon_hi)__a);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_p64(poly64_t __a)
{
return (poly64x1_t)__builtin_neon_vdup_ndi((__builtin_neon_di)__a);
}
#pragma GCC pop_options
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_s64(int64_t __a)
{
return (int64x1_t)__builtin_neon_vdup_ndi((__builtin_neon_di)__a);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_u64(uint64_t __a)
{
return (uint64x1_t)__builtin_neon_vdup_ndi((__builtin_neon_di)__a);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_p64(poly64_t __a)
{
return (poly64x2_t)__builtin_neon_vdup_nv2di((__builtin_neon_di)__a);
}
#pragma GCC pop_options
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_s8(int8_t __a)
{
return (int8x16_t)__builtin_neon_vdup_nv16qi((__builtin_neon_qi)__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_s16(int16_t __a)
{
return (int16x8_t)__builtin_neon_vdup_nv8hi((__builtin_neon_hi)__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_s32(int32_t __a)
{
return (int32x4_t)__builtin_neon_vdup_nv4si((__builtin_neon_si)__a);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_f32(float32_t __a)
{
return (float32x4_t)__builtin_neon_vdup_nv4sf((__builtin_neon_sf)__a);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_u8(uint8_t __a)
{
return (uint8x16_t)__builtin_neon_vdup_nv16qi((__builtin_neon_qi)__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_u16(uint16_t __a)
{
return (uint16x8_t)__builtin_neon_vdup_nv8hi((__builtin_neon_hi)__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_u32(uint32_t __a)
{
return (uint32x4_t)__builtin_neon_vdup_nv4si((__builtin_neon_si)__a);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_p8(poly8_t __a)
{
return (poly8x16_t)__builtin_neon_vdup_nv16qi((__builtin_neon_qi)__a);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_p16(poly16_t __a)
{
return (poly16x8_t)__builtin_neon_vdup_nv8hi((__builtin_neon_hi)__a);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_s64(int64_t __a)
{
return (int64x2_t)__builtin_neon_vdup_nv2di((__builtin_neon_di)__a);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_u64(uint64_t __a)
{
return (uint64x2_t)__builtin_neon_vdup_nv2di((__builtin_neon_di)__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_s8(int8_t __a)
{
return (int8x8_t)__builtin_neon_vdup_nv8qi((__builtin_neon_qi)__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_s16(int16_t __a)
{
return (int16x4_t)__builtin_neon_vdup_nv4hi((__builtin_neon_hi)__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_s32(int32_t __a)
{
return (int32x2_t)__builtin_neon_vdup_nv2si((__builtin_neon_si)__a);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_f32(float32_t __a)
{
return (float32x2_t)__builtin_neon_vdup_nv2sf((__builtin_neon_sf)__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_u8(uint8_t __a)
{
return (uint8x8_t)__builtin_neon_vdup_nv8qi((__builtin_neon_qi)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_u16(uint16_t __a)
{
return (uint16x4_t)__builtin_neon_vdup_nv4hi((__builtin_neon_hi)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_u32(uint32_t __a)
{
return (uint32x2_t)__builtin_neon_vdup_nv2si((__builtin_neon_si)__a);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_p8(poly8_t __a)
{
return (poly8x8_t)__builtin_neon_vdup_nv8qi((__builtin_neon_qi)__a);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_p16(poly16_t __a)
{
return (poly16x4_t)__builtin_neon_vdup_nv4hi((__builtin_neon_hi)__a);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_s64(int64_t __a)
{
return (int64x1_t)__builtin_neon_vdup_ndi((__builtin_neon_di)__a);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_u64(uint64_t __a)
{
return (uint64x1_t)__builtin_neon_vdup_ndi((__builtin_neon_di)__a);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_s8(int8_t __a)
{
return (int8x16_t)__builtin_neon_vdup_nv16qi((__builtin_neon_qi)__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_s16(int16_t __a)
{
return (int16x8_t)__builtin_neon_vdup_nv8hi((__builtin_neon_hi)__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_s32(int32_t __a)
{
return (int32x4_t)__builtin_neon_vdup_nv4si((__builtin_neon_si)__a);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_f32(float32_t __a)
{
return (float32x4_t)__builtin_neon_vdup_nv4sf((__builtin_neon_sf)__a);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_u8(uint8_t __a)
{
return (uint8x16_t)__builtin_neon_vdup_nv16qi((__builtin_neon_qi)__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_u16(uint16_t __a)
{
return (uint16x8_t)__builtin_neon_vdup_nv8hi((__builtin_neon_hi)__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_u32(uint32_t __a)
{
return (uint32x4_t)__builtin_neon_vdup_nv4si((__builtin_neon_si)__a);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_p8(poly8_t __a)
{
return (poly8x16_t)__builtin_neon_vdup_nv16qi((__builtin_neon_qi)__a);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_p16(poly16_t __a)
{
return (poly16x8_t)__builtin_neon_vdup_nv8hi((__builtin_neon_hi)__a);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_s64(int64_t __a)
{
return (int64x2_t)__builtin_neon_vdup_nv2di((__builtin_neon_di)__a);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_u64(uint64_t __a)
{
return (uint64x2_t)__builtin_neon_vdup_nv2di((__builtin_neon_di)__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_s8(int8x8_t __a, const int __b)
{
return (int8x8_t)__builtin_neon_vdup_lanev8qi(__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_s16(int16x4_t __a, const int __b)
{
return (int16x4_t)__builtin_neon_vdup_lanev4hi(__a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_s32(int32x2_t __a, const int __b)
{
return (int32x2_t)__builtin_neon_vdup_lanev2si(__a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_f32(float32x2_t __a, const int __b)
{
return (float32x2_t)__builtin_neon_vdup_lanev2sf(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_u8(uint8x8_t __a, const int __b)
{
return (uint8x8_t)__builtin_neon_vdup_lanev8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_u16(uint16x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vdup_lanev4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_u32(uint32x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vdup_lanev2si((int32x2_t)__a, __b);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_p8(poly8x8_t __a, const int __b)
{
return (poly8x8_t)__builtin_neon_vdup_lanev8qi((int8x8_t)__a, __b);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_p16(poly16x4_t __a, const int __b)
{
return (poly16x4_t)__builtin_neon_vdup_lanev4hi((int16x4_t)__a, __b);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_p64(poly64x1_t __a, const int __b)
{
return (poly64x1_t)__builtin_neon_vdup_lanedi(__a, __b);
}
#pragma GCC pop_options
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_s64(int64x1_t __a, const int __b)
{
return (int64x1_t)__builtin_neon_vdup_lanedi(__a, __b);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_u64(uint64x1_t __a, const int __b)
{
return (uint64x1_t)__builtin_neon_vdup_lanedi((int64x1_t)__a, __b);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_s8(int8x8_t __a, const int __b)
{
return (int8x16_t)__builtin_neon_vdup_lanev16qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_s16(int16x4_t __a, const int __b)
{
return (int16x8_t)__builtin_neon_vdup_lanev8hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_s32(int32x2_t __a, const int __b)
{
return (int32x4_t)__builtin_neon_vdup_lanev4si(__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_f32(float32x2_t __a, const int __b)
{
return (float32x4_t)__builtin_neon_vdup_lanev4sf(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_u8(uint8x8_t __a, const int __b)
{
return (uint8x16_t)__builtin_neon_vdup_lanev16qi((int8x8_t)__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_u16(uint16x4_t __a, const int __b)
{
return (uint16x8_t)__builtin_neon_vdup_lanev8hi((int16x4_t)__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_u32(uint32x2_t __a, const int __b)
{
return (uint32x4_t)__builtin_neon_vdup_lanev4si((int32x2_t)__a, __b);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_p8(poly8x8_t __a, const int __b)
{
return (poly8x16_t)__builtin_neon_vdup_lanev16qi((int8x8_t)__a, __b);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_p16(poly16x4_t __a, const int __b)
{
return (poly16x8_t)__builtin_neon_vdup_lanev8hi((int16x4_t)__a, __b);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_p64(poly64x1_t __a, const int __b)
{
return (poly64x2_t)__builtin_neon_vdup_lanev2di(__a, __b);
}
#pragma GCC pop_options
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_s64(int64x1_t __a, const int __b)
{
return (int64x2_t)__builtin_neon_vdup_lanev2di(__a, __b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_u64(uint64x1_t __a, const int __b)
{
return (uint64x2_t)__builtin_neon_vdup_lanev2di((int64x1_t)__a, __b);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_p64(poly64x1_t __a, poly64x1_t __b)
{
return (poly64x2_t)__builtin_neon_vcombinedi(__a, __b);
}
#pragma GCC pop_options
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x16_t)__builtin_neon_vcombinev8qi(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_s16(int16x4_t __a, int16x4_t __b)
{
return (int16x8_t)__builtin_neon_vcombinev4hi(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_s32(int32x2_t __a, int32x2_t __b)
{
return (int32x4_t)__builtin_neon_vcombinev2si(__a, __b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_s64(int64x1_t __a, int64x1_t __b)
{
return (int64x2_t)__builtin_neon_vcombinedi(__a, __b);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vcombinev4hf(__a, __b);
}
#endif
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_f32(float32x2_t __a, float32x2_t __b)
{
return (float32x4_t)__builtin_neon_vcombinev2sf(__a, __b);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x16_t)__builtin_neon_vcombinev8qi((int8x8_t)__a,
(int8x8_t)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_u16(uint16x4_t __a, uint16x4_t __b)
{
return (uint16x8_t)__builtin_neon_vcombinev4hi((int16x4_t)__a,
(int16x4_t)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_u32(uint32x2_t __a, uint32x2_t __b)
{
return (uint32x4_t)__builtin_neon_vcombinev2si((int32x2_t)__a,
(int32x2_t)__b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_u64(uint64x1_t __a, uint64x1_t __b)
{
return (uint64x2_t)__builtin_neon_vcombinedi((int64x1_t)__a,
(int64x1_t)__b);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_p8(poly8x8_t __a, poly8x8_t __b)
{
return (poly8x16_t)__builtin_neon_vcombinev8qi((int8x8_t)__a,
(int8x8_t)__b);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcombine_p16(poly16x4_t __a, poly16x4_t __b)
{
return (poly16x8_t)__builtin_neon_vcombinev4hi((int16x4_t)__a,
(int16x4_t)__b);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_p64(poly64x2_t __a)
{
return (poly64x1_t)__builtin_neon_vget_highv2di((int64x2_t)__a);
}
#pragma GCC pop_options
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_s8(int8x16_t __a)
{
return (int8x8_t)__builtin_neon_vget_highv16qi(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_s16(int16x8_t __a)
{
return (int16x4_t)__builtin_neon_vget_highv8hi(__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_s32(int32x4_t __a)
{
return (int32x2_t)__builtin_neon_vget_highv4si(__a);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_s64(int64x2_t __a)
{
return (int64x1_t)__builtin_neon_vget_highv2di(__a);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_f16(float16x8_t __a)
{
return __builtin_neon_vget_highv8hf(__a);
}
#endif
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_f32(float32x4_t __a)
{
return (float32x2_t)__builtin_neon_vget_highv4sf(__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_u8(uint8x16_t __a)
{
return (uint8x8_t)__builtin_neon_vget_highv16qi((int8x16_t)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_u16(uint16x8_t __a)
{
return (uint16x4_t)__builtin_neon_vget_highv8hi((int16x8_t)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_u32(uint32x4_t __a)
{
return (uint32x2_t)__builtin_neon_vget_highv4si((int32x4_t)__a);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_u64(uint64x2_t __a)
{
return (uint64x1_t)__builtin_neon_vget_highv2di((int64x2_t)__a);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_p8(poly8x16_t __a)
{
return (poly8x8_t)__builtin_neon_vget_highv16qi((int8x16_t)__a);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_high_p16(poly16x8_t __a)
{
return (poly16x4_t)__builtin_neon_vget_highv8hi((int16x8_t)__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_s8(int8x16_t __a)
{
return (int8x8_t)__builtin_neon_vget_lowv16qi(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_s16(int16x8_t __a)
{
return (int16x4_t)__builtin_neon_vget_lowv8hi(__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_s32(int32x4_t __a)
{
return (int32x2_t)__builtin_neon_vget_lowv4si(__a);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_f16(float16x8_t __a)
{
return __builtin_neon_vget_lowv8hf(__a);
}
#endif
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_f32(float32x4_t __a)
{
return (float32x2_t)__builtin_neon_vget_lowv4sf(__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_u8(uint8x16_t __a)
{
return (uint8x8_t)__builtin_neon_vget_lowv16qi((int8x16_t)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_u16(uint16x8_t __a)
{
return (uint16x4_t)__builtin_neon_vget_lowv8hi((int16x8_t)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_u32(uint32x4_t __a)
{
return (uint32x2_t)__builtin_neon_vget_lowv4si((int32x4_t)__a);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_p8(poly8x16_t __a)
{
return (poly8x8_t)__builtin_neon_vget_lowv16qi((int8x16_t)__a);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_p16(poly16x8_t __a)
{
return (poly16x4_t)__builtin_neon_vget_lowv8hi((int16x8_t)__a);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_p64(poly64x2_t __a)
{
return (poly64x1_t)__builtin_neon_vget_lowv2di((int64x2_t)__a);
}
#pragma GCC pop_options
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_s64(int64x2_t __a)
{
return (int64x1_t)__builtin_neon_vget_lowv2di(__a);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vget_low_u64(uint64x2_t __a)
{
return (uint64x1_t)__builtin_neon_vget_lowv2di((int64x2_t)__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_s32_f32(float32x2_t __a)
{
return (int32x2_t)__builtin_neon_vcvtsv2sf(__a);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_f32_s32(int32x2_t __a)
{
return (float32x2_t)__builtin_neon_vcvtsv2si(__a);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_f32_u32(uint32x2_t __a)
{
return (float32x2_t)__builtin_neon_vcvtuv2si((int32x2_t)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_u32_f32(float32x2_t __a)
{
return (uint32x2_t)__builtin_neon_vcvtuv2sf(__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_s32_f32(float32x4_t __a)
{
return (int32x4_t)__builtin_neon_vcvtsv4sf(__a);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_f32_s32(int32x4_t __a)
{
return (float32x4_t)__builtin_neon_vcvtsv4si(__a);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_f32_u32(uint32x4_t __a)
{
return (float32x4_t)__builtin_neon_vcvtuv4si((int32x4_t)__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_u32_f32(float32x4_t __a)
{
return (uint32x4_t)__builtin_neon_vcvtuv4sf(__a);
}
#pragma GCC push_options
#pragma GCC target("fpu=neon-fp16")
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_f16_f32(float32x4_t __a)
{
return (float16x4_t)__builtin_neon_vcvtv4hfv4sf(__a);
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_f32_f16(float16x4_t __a)
{
return (float32x4_t)__builtin_neon_vcvtv4sfv4hf(__a);
}
#endif
#pragma GCC pop_options
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_n_s32_f32(float32x2_t __a, const int __b)
{
return (int32x2_t)__builtin_neon_vcvts_nv2sf(__a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_n_f32_s32(int32x2_t __a, const int __b)
{
return (float32x2_t)__builtin_neon_vcvts_nv2si(__a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_n_f32_u32(uint32x2_t __a, const int __b)
{
return (float32x2_t)__builtin_neon_vcvtu_nv2si((int32x2_t)__a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_n_u32_f32(float32x2_t __a, const int __b)
{
return (uint32x2_t)__builtin_neon_vcvtu_nv2sf(__a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_n_s32_f32(float32x4_t __a, const int __b)
{
return (int32x4_t)__builtin_neon_vcvts_nv4sf(__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_n_f32_s32(int32x4_t __a, const int __b)
{
return (float32x4_t)__builtin_neon_vcvts_nv4si(__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_n_f32_u32(uint32x4_t __a, const int __b)
{
return (float32x4_t)__builtin_neon_vcvtu_nv4si((int32x4_t)__a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_n_u32_f32(float32x4_t __a, const int __b)
{
return (uint32x4_t)__builtin_neon_vcvtu_nv4sf(__a, __b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovn_s16(int16x8_t __a)
{
return (int8x8_t)__builtin_neon_vmovnv8hi(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovn_s32(int32x4_t __a)
{
return (int16x4_t)__builtin_neon_vmovnv4si(__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovn_s64(int64x2_t __a)
{
return (int32x2_t)__builtin_neon_vmovnv2di(__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovn_u16(uint16x8_t __a)
{
return (uint8x8_t)__builtin_neon_vmovnv8hi((int16x8_t)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovn_u32(uint32x4_t __a)
{
return (uint16x4_t)__builtin_neon_vmovnv4si((int32x4_t)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovn_u64(uint64x2_t __a)
{
return (uint32x2_t)__builtin_neon_vmovnv2di((int64x2_t)__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_s16(int16x8_t __a)
{
return (int8x8_t)__builtin_neon_vqmovnsv8hi(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_s32(int32x4_t __a)
{
return (int16x4_t)__builtin_neon_vqmovnsv4si(__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_s64(int64x2_t __a)
{
return (int32x2_t)__builtin_neon_vqmovnsv2di(__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_u16(uint16x8_t __a)
{
return (uint8x8_t)__builtin_neon_vqmovnuv8hi((int16x8_t)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_u32(uint32x4_t __a)
{
return (uint16x4_t)__builtin_neon_vqmovnuv4si((int32x4_t)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqmovn_u64(uint64x2_t __a)
{
return (uint32x2_t)__builtin_neon_vqmovnuv2di((int64x2_t)__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqmovun_s16(int16x8_t __a)
{
return (uint8x8_t)__builtin_neon_vqmovunv8hi(__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqmovun_s32(int32x4_t __a)
{
return (uint16x4_t)__builtin_neon_vqmovunv4si(__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqmovun_s64(int64x2_t __a)
{
return (uint32x2_t)__builtin_neon_vqmovunv2di(__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovl_s8(int8x8_t __a)
{
return (int16x8_t)__builtin_neon_vmovlsv8qi(__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovl_s16(int16x4_t __a)
{
return (int32x4_t)__builtin_neon_vmovlsv4hi(__a);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovl_s32(int32x2_t __a)
{
return (int64x2_t)__builtin_neon_vmovlsv2si(__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovl_u8(uint8x8_t __a)
{
return (uint16x8_t)__builtin_neon_vmovluv8qi((int8x8_t)__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovl_u16(uint16x4_t __a)
{
return (uint32x4_t)__builtin_neon_vmovluv4hi((int16x4_t)__a);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovl_u32(uint32x2_t __a)
{
return (uint64x2_t)__builtin_neon_vmovluv2si((int32x2_t)__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbl1_s8(int8x8_t __a, int8x8_t __b)
{
return (int8x8_t)__builtin_neon_vtbl1v8qi(__a, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbl1_u8(uint8x8_t __a, uint8x8_t __b)
{
return (uint8x8_t)__builtin_neon_vtbl1v8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbl1_p8(poly8x8_t __a, uint8x8_t __b)
{
return (poly8x8_t)__builtin_neon_vtbl1v8qi((int8x8_t)__a, (int8x8_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbl2_s8(int8x8x2_t __a, int8x8_t __b)
{
union
{
int8x8x2_t __i;
__builtin_neon_ti __o;
} __au = {__a};
return (int8x8_t)__builtin_neon_vtbl2v8qi(__au.__o, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbl2_u8(uint8x8x2_t __a, uint8x8_t __b)
{
union
{
uint8x8x2_t __i;
__builtin_neon_ti __o;
} __au = {__a};
return (uint8x8_t)__builtin_neon_vtbl2v8qi(__au.__o, (int8x8_t)__b);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbl2_p8(poly8x8x2_t __a, uint8x8_t __b)
{
union
{
poly8x8x2_t __i;
__builtin_neon_ti __o;
} __au = {__a};
return (poly8x8_t)__builtin_neon_vtbl2v8qi(__au.__o, (int8x8_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbl3_s8(int8x8x3_t __a, int8x8_t __b)
{
union
{
int8x8x3_t __i;
__builtin_neon_ei __o;
} __au = {__a};
return (int8x8_t)__builtin_neon_vtbl3v8qi(__au.__o, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbl3_u8(uint8x8x3_t __a, uint8x8_t __b)
{
union
{
uint8x8x3_t __i;
__builtin_neon_ei __o;
} __au = {__a};
return (uint8x8_t)__builtin_neon_vtbl3v8qi(__au.__o, (int8x8_t)__b);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbl3_p8(poly8x8x3_t __a, uint8x8_t __b)
{
union
{
poly8x8x3_t __i;
__builtin_neon_ei __o;
} __au = {__a};
return (poly8x8_t)__builtin_neon_vtbl3v8qi(__au.__o, (int8x8_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbl4_s8(int8x8x4_t __a, int8x8_t __b)
{
union
{
int8x8x4_t __i;
__builtin_neon_oi __o;
} __au = {__a};
return (int8x8_t)__builtin_neon_vtbl4v8qi(__au.__o, __b);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbl4_u8(uint8x8x4_t __a, uint8x8_t __b)
{
union
{
uint8x8x4_t __i;
__builtin_neon_oi __o;
} __au = {__a};
return (uint8x8_t)__builtin_neon_vtbl4v8qi(__au.__o, (int8x8_t)__b);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbl4_p8(poly8x8x4_t __a, uint8x8_t __b)
{
union
{
poly8x8x4_t __i;
__builtin_neon_oi __o;
} __au = {__a};
return (poly8x8_t)__builtin_neon_vtbl4v8qi(__au.__o, (int8x8_t)__b);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbx1_s8(int8x8_t __a, int8x8_t __b, int8x8_t __c)
{
return (int8x8_t)__builtin_neon_vtbx1v8qi(__a, __b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbx1_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
return (uint8x8_t)__builtin_neon_vtbx1v8qi((int8x8_t)__a, (int8x8_t)__b,
(int8x8_t)__c);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbx1_p8(poly8x8_t __a, poly8x8_t __b, uint8x8_t __c)
{
return (poly8x8_t)__builtin_neon_vtbx1v8qi((int8x8_t)__a, (int8x8_t)__b,
(int8x8_t)__c);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbx2_s8(int8x8_t __a, int8x8x2_t __b, int8x8_t __c)
{
union
{
int8x8x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
return (int8x8_t)__builtin_neon_vtbx2v8qi(__a, __bu.__o, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbx2_u8(uint8x8_t __a, uint8x8x2_t __b, uint8x8_t __c)
{
union
{
uint8x8x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
return (uint8x8_t)__builtin_neon_vtbx2v8qi((int8x8_t)__a, __bu.__o,
(int8x8_t)__c);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbx2_p8(poly8x8_t __a, poly8x8x2_t __b, uint8x8_t __c)
{
union
{
poly8x8x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
return (poly8x8_t)__builtin_neon_vtbx2v8qi((int8x8_t)__a, __bu.__o,
(int8x8_t)__c);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbx3_s8(int8x8_t __a, int8x8x3_t __b, int8x8_t __c)
{
union
{
int8x8x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
return (int8x8_t)__builtin_neon_vtbx3v8qi(__a, __bu.__o, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbx3_u8(uint8x8_t __a, uint8x8x3_t __b, uint8x8_t __c)
{
union
{
uint8x8x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
return (uint8x8_t)__builtin_neon_vtbx3v8qi((int8x8_t)__a, __bu.__o,
(int8x8_t)__c);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbx3_p8(poly8x8_t __a, poly8x8x3_t __b, uint8x8_t __c)
{
union
{
poly8x8x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
return (poly8x8_t)__builtin_neon_vtbx3v8qi((int8x8_t)__a, __bu.__o,
(int8x8_t)__c);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbx4_s8(int8x8_t __a, int8x8x4_t __b, int8x8_t __c)
{
union
{
int8x8x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
return (int8x8_t)__builtin_neon_vtbx4v8qi(__a, __bu.__o, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbx4_u8(uint8x8_t __a, uint8x8x4_t __b, uint8x8_t __c)
{
union
{
uint8x8x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
return (uint8x8_t)__builtin_neon_vtbx4v8qi((int8x8_t)__a, __bu.__o,
(int8x8_t)__c);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtbx4_p8(poly8x8_t __a, poly8x8x4_t __b, uint8x8_t __c)
{
union
{
poly8x8x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
return (poly8x8_t)__builtin_neon_vtbx4v8qi((int8x8_t)__a, __bu.__o,
(int8x8_t)__c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_lane_s16(int16x4_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t)__builtin_neon_vmul_lanev4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_lane_s32(int32x2_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t)__builtin_neon_vmul_lanev2si(__a, __b, __c);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_lane_f32(float32x2_t __a, float32x2_t __b, const int __c)
{
return (float32x2_t)__builtin_neon_vmul_lanev2sf(__a, __b, __c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_lane_u16(uint16x4_t __a, uint16x4_t __b, const int __c)
{
return (uint16x4_t)__builtin_neon_vmul_lanev4hi((int16x4_t)__a,
(int16x4_t)__b, __c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_lane_u32(uint32x2_t __a, uint32x2_t __b, const int __c)
{
return (uint32x2_t)__builtin_neon_vmul_lanev2si((int32x2_t)__a,
(int32x2_t)__b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_lane_s16(int16x8_t __a, int16x4_t __b, const int __c)
{
return (int16x8_t)__builtin_neon_vmul_lanev8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_lane_s32(int32x4_t __a, int32x2_t __b, const int __c)
{
return (int32x4_t)__builtin_neon_vmul_lanev4si(__a, __b, __c);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_lane_f32(float32x4_t __a, float32x2_t __b, const int __c)
{
return (float32x4_t)__builtin_neon_vmul_lanev4sf(__a, __b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_lane_u16(uint16x8_t __a, uint16x4_t __b, const int __c)
{
return (uint16x8_t)__builtin_neon_vmul_lanev8hi((int16x8_t)__a,
(int16x4_t)__b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_lane_u32(uint32x4_t __a, uint32x2_t __b, const int __c)
{
return (uint32x4_t)__builtin_neon_vmul_lanev4si((int32x4_t)__a,
(int32x2_t)__b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
return (int16x4_t)__builtin_neon_vmla_lanev4hi(__a, __b, __c, __d);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
return (int32x2_t)__builtin_neon_vmla_lanev2si(__a, __b, __c, __d);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c,
const int __d)
{
return (float32x2_t)__builtin_neon_vmla_lanev2sf(__a, __b, __c, __d);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
{
return (uint16x4_t)__builtin_neon_vmla_lanev4hi(
(int16x4_t)__a, (int16x4_t)__b, (int16x4_t)__c, __d);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_lane_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
{
return (uint32x2_t)__builtin_neon_vmla_lanev2si(
(int32x2_t)__a, (int32x2_t)__b, (int32x2_t)__c, __d);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_s16(int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
{
return (int16x8_t)__builtin_neon_vmla_lanev8hi(__a, __b, __c, __d);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_s32(int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
{
return (int32x4_t)__builtin_neon_vmla_lanev4si(__a, __b, __c, __d);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_f32(float32x4_t __a, float32x4_t __b, float32x2_t __c,
const int __d)
{
return (float32x4_t)__builtin_neon_vmla_lanev4sf(__a, __b, __c, __d);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_u16(uint16x8_t __a, uint16x8_t __b, uint16x4_t __c,
const int __d)
{
return (uint16x8_t)__builtin_neon_vmla_lanev8hi(
(int16x8_t)__a, (int16x8_t)__b, (int16x4_t)__c, __d);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_lane_u32(uint32x4_t __a, uint32x4_t __b, uint32x2_t __c,
const int __d)
{
return (uint32x4_t)__builtin_neon_vmla_lanev4si(
(int32x4_t)__a, (int32x4_t)__b, (int32x2_t)__c, __d);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_lane_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
return (int32x4_t)__builtin_neon_vmlals_lanev4hi(__a, __b, __c, __d);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_lane_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
return (int64x2_t)__builtin_neon_vmlals_lanev2si(__a, __b, __c, __d);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_lane_u16(uint32x4_t __a, uint16x4_t __b, uint16x4_t __c,
const int __d)
{
return (uint32x4_t)__builtin_neon_vmlalu_lanev4hi(
(int32x4_t)__a, (int16x4_t)__b, (int16x4_t)__c, __d);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_lane_u32(uint64x2_t __a, uint32x2_t __b, uint32x2_t __c,
const int __d)
{
return (uint64x2_t)__builtin_neon_vmlalu_lanev2si(
(int64x2_t)__a, (int32x2_t)__b, (int32x2_t)__c, __d);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_lane_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
return (int32x4_t)__builtin_neon_vqdmlal_lanev4hi(__a, __b, __c, __d);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_lane_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
return (int64x2_t)__builtin_neon_vqdmlal_lanev2si(__a, __b, __c, __d);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
return (int16x4_t)__builtin_neon_vmls_lanev4hi(__a, __b, __c, __d);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
return (int32x2_t)__builtin_neon_vmls_lanev2si(__a, __b, __c, __d);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_f32(float32x2_t __a, float32x2_t __b, float32x2_t __c,
const int __d)
{
return (float32x2_t)__builtin_neon_vmls_lanev2sf(__a, __b, __c, __d);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c, const int __d)
{
return (uint16x4_t)__builtin_neon_vmls_lanev4hi(
(int16x4_t)__a, (int16x4_t)__b, (int16x4_t)__c, __d);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_lane_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c, const int __d)
{
return (uint32x2_t)__builtin_neon_vmls_lanev2si(
(int32x2_t)__a, (int32x2_t)__b, (int32x2_t)__c, __d);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_s16(int16x8_t __a, int16x8_t __b, int16x4_t __c, const int __d)
{
return (int16x8_t)__builtin_neon_vmls_lanev8hi(__a, __b, __c, __d);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_s32(int32x4_t __a, int32x4_t __b, int32x2_t __c, const int __d)
{
return (int32x4_t)__builtin_neon_vmls_lanev4si(__a, __b, __c, __d);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_f32(float32x4_t __a, float32x4_t __b, float32x2_t __c,
const int __d)
{
return (float32x4_t)__builtin_neon_vmls_lanev4sf(__a, __b, __c, __d);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_u16(uint16x8_t __a, uint16x8_t __b, uint16x4_t __c,
const int __d)
{
return (uint16x8_t)__builtin_neon_vmls_lanev8hi(
(int16x8_t)__a, (int16x8_t)__b, (int16x4_t)__c, __d);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_lane_u32(uint32x4_t __a, uint32x4_t __b, uint32x2_t __c,
const int __d)
{
return (uint32x4_t)__builtin_neon_vmls_lanev4si(
(int32x4_t)__a, (int32x4_t)__b, (int32x2_t)__c, __d);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_lane_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
return (int32x4_t)__builtin_neon_vmlsls_lanev4hi(__a, __b, __c, __d);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_lane_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
return (int64x2_t)__builtin_neon_vmlsls_lanev2si(__a, __b, __c, __d);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_lane_u16(uint32x4_t __a, uint16x4_t __b, uint16x4_t __c,
const int __d)
{
return (uint32x4_t)__builtin_neon_vmlslu_lanev4hi(
(int32x4_t)__a, (int16x4_t)__b, (int16x4_t)__c, __d);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_lane_u32(uint64x2_t __a, uint32x2_t __b, uint32x2_t __c,
const int __d)
{
return (uint64x2_t)__builtin_neon_vmlslu_lanev2si(
(int64x2_t)__a, (int32x2_t)__b, (int32x2_t)__c, __d);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_lane_s16(int32x4_t __a, int16x4_t __b, int16x4_t __c, const int __d)
{
return (int32x4_t)__builtin_neon_vqdmlsl_lanev4hi(__a, __b, __c, __d);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_lane_s32(int64x2_t __a, int32x2_t __b, int32x2_t __c, const int __d)
{
return (int64x2_t)__builtin_neon_vqdmlsl_lanev2si(__a, __b, __c, __d);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_lane_s16(int16x4_t __a, int16x4_t __b, const int __c)
{
return (int32x4_t)__builtin_neon_vmulls_lanev4hi(__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_lane_s32(int32x2_t __a, int32x2_t __b, const int __c)
{
return (int64x2_t)__builtin_neon_vmulls_lanev2si(__a, __b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_lane_u16(uint16x4_t __a, uint16x4_t __b, const int __c)
{
return (uint32x4_t)__builtin_neon_vmullu_lanev4hi((int16x4_t)__a,
(int16x4_t)__b, __c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_lane_u32(uint32x2_t __a, uint32x2_t __b, const int __c)
{
return (uint64x2_t)__builtin_neon_vmullu_lanev2si((int32x2_t)__a,
(int32x2_t)__b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_lane_s16(int16x4_t __a, int16x4_t __b, const int __c)
{
return (int32x4_t)__builtin_neon_vqdmull_lanev4hi(__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_lane_s32(int32x2_t __a, int32x2_t __b, const int __c)
{
return (int64x2_t)__builtin_neon_vqdmull_lanev2si(__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_lane_s16(int16x8_t __a, int16x4_t __b, const int __c)
{
return (int16x8_t)__builtin_neon_vqdmulh_lanev8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_lane_s32(int32x4_t __a, int32x2_t __b, const int __c)
{
return (int32x4_t)__builtin_neon_vqdmulh_lanev4si(__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_lane_s16(int16x4_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t)__builtin_neon_vqdmulh_lanev4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_lane_s32(int32x2_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t)__builtin_neon_vqdmulh_lanev2si(__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_lane_s16(int16x8_t __a, int16x4_t __b, const int __c)
{
return (int16x8_t)__builtin_neon_vqrdmulh_lanev8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_lane_s32(int32x4_t __a, int32x2_t __b, const int __c)
{
return (int32x4_t)__builtin_neon_vqrdmulh_lanev4si(__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_lane_s16(int16x4_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t)__builtin_neon_vqrdmulh_lanev4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_lane_s32(int32x2_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t)__builtin_neon_vqrdmulh_lanev2si(__a, __b, __c);
}
#ifdef __ARM_FEATURE_QRDMX
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahq_lane_s16(int16x8_t __a, int16x8_t __b, int16x4_t __c,
const int __d)
{
return (int16x8_t)__builtin_neon_vqrdmlah_lanev8hi(__a, __b, __c, __d);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlahq_lane_s32(int32x4_t __a, int32x4_t __b, int32x2_t __c,
const int __d)
{
return (int32x4_t)__builtin_neon_vqrdmlah_lanev4si(__a, __b, __c, __d);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlah_lane_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c,
const int __d)
{
return (int16x4_t)__builtin_neon_vqrdmlah_lanev4hi(__a, __b, __c, __d);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlah_lane_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c,
const int __d)
{
return (int32x2_t)__builtin_neon_vqrdmlah_lanev2si(__a, __b, __c, __d);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshq_lane_s16(int16x8_t __a, int16x8_t __b, int16x4_t __c,
const int __d)
{
return (int16x8_t)__builtin_neon_vqrdmlsh_lanev8hi(__a, __b, __c, __d);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlshq_lane_s32(int32x4_t __a, int32x4_t __b, int32x2_t __c,
const int __d)
{
return (int32x4_t)__builtin_neon_vqrdmlsh_lanev4si(__a, __b, __c, __d);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlsh_lane_s16(int16x4_t __a, int16x4_t __b, int16x4_t __c,
const int __d)
{
return (int16x4_t)__builtin_neon_vqrdmlsh_lanev4hi(__a, __b, __c, __d);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmlsh_lane_s32(int32x2_t __a, int32x2_t __b, int32x2_t __c,
const int __d)
{
return (int32x2_t)__builtin_neon_vqrdmlsh_lanev2si(__a, __b, __c, __d);
}
#endif
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_n_s16(int16x4_t __a, int16_t __b)
{
return (int16x4_t)__builtin_neon_vmul_nv4hi(__a, (__builtin_neon_hi)__b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_n_s32(int32x2_t __a, int32_t __b)
{
return (int32x2_t)__builtin_neon_vmul_nv2si(__a, (__builtin_neon_si)__b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_n_f32(float32x2_t __a, float32_t __b)
{
return (float32x2_t)__builtin_neon_vmul_nv2sf(__a, (__builtin_neon_sf)__b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_n_u16(uint16x4_t __a, uint16_t __b)
{
return (uint16x4_t)__builtin_neon_vmul_nv4hi((int16x4_t)__a,
(__builtin_neon_hi)__b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_n_u32(uint32x2_t __a, uint32_t __b)
{
return (uint32x2_t)__builtin_neon_vmul_nv2si((int32x2_t)__a,
(__builtin_neon_si)__b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_n_s16(int16x8_t __a, int16_t __b)
{
return (int16x8_t)__builtin_neon_vmul_nv8hi(__a, (__builtin_neon_hi)__b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_n_s32(int32x4_t __a, int32_t __b)
{
return (int32x4_t)__builtin_neon_vmul_nv4si(__a, (__builtin_neon_si)__b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_n_f32(float32x4_t __a, float32_t __b)
{
return (float32x4_t)__builtin_neon_vmul_nv4sf(__a, (__builtin_neon_sf)__b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_n_u16(uint16x8_t __a, uint16_t __b)
{
return (uint16x8_t)__builtin_neon_vmul_nv8hi((int16x8_t)__a,
(__builtin_neon_hi)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_n_u32(uint32x4_t __a, uint32_t __b)
{
return (uint32x4_t)__builtin_neon_vmul_nv4si((int32x4_t)__a,
(__builtin_neon_si)__b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_n_s16(int16x4_t __a, int16_t __b)
{
return (int32x4_t)__builtin_neon_vmulls_nv4hi(__a, (__builtin_neon_hi)__b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_n_s32(int32x2_t __a, int32_t __b)
{
return (int64x2_t)__builtin_neon_vmulls_nv2si(__a, (__builtin_neon_si)__b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_n_u16(uint16x4_t __a, uint16_t __b)
{
return (uint32x4_t)__builtin_neon_vmullu_nv4hi((int16x4_t)__a,
(__builtin_neon_hi)__b);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_n_u32(uint32x2_t __a, uint32_t __b)
{
return (uint64x2_t)__builtin_neon_vmullu_nv2si((int32x2_t)__a,
(__builtin_neon_si)__b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_n_s16(int16x4_t __a, int16_t __b)
{
return (int32x4_t)__builtin_neon_vqdmull_nv4hi(__a, (__builtin_neon_hi)__b);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmull_n_s32(int32x2_t __a, int32_t __b)
{
return (int64x2_t)__builtin_neon_vqdmull_nv2si(__a, (__builtin_neon_si)__b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_n_s16(int16x8_t __a, int16_t __b)
{
return (int16x8_t)__builtin_neon_vqdmulh_nv8hi(__a, (__builtin_neon_hi)__b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmulhq_n_s32(int32x4_t __a, int32_t __b)
{
return (int32x4_t)__builtin_neon_vqdmulh_nv4si(__a, (__builtin_neon_si)__b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_n_s16(int16x4_t __a, int16_t __b)
{
return (int16x4_t)__builtin_neon_vqdmulh_nv4hi(__a, (__builtin_neon_hi)__b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmulh_n_s32(int32x2_t __a, int32_t __b)
{
return (int32x2_t)__builtin_neon_vqdmulh_nv2si(__a, (__builtin_neon_si)__b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_n_s16(int16x8_t __a, int16_t __b)
{
return (int16x8_t)__builtin_neon_vqrdmulh_nv8hi(__a,
(__builtin_neon_hi)__b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulhq_n_s32(int32x4_t __a, int32_t __b)
{
return (int32x4_t)__builtin_neon_vqrdmulh_nv4si(__a,
(__builtin_neon_si)__b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_n_s16(int16x4_t __a, int16_t __b)
{
return (int16x4_t)__builtin_neon_vqrdmulh_nv4hi(__a,
(__builtin_neon_hi)__b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqrdmulh_n_s32(int32x2_t __a, int32_t __b)
{
return (int32x2_t)__builtin_neon_vqrdmulh_nv2si(__a,
(__builtin_neon_si)__b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_s16(int16x4_t __a, int16x4_t __b, int16_t __c)
{
return (int16x4_t)__builtin_neon_vmla_nv4hi(__a, __b,
(__builtin_neon_hi)__c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_s32(int32x2_t __a, int32x2_t __b, int32_t __c)
{
return (int32x2_t)__builtin_neon_vmla_nv2si(__a, __b,
(__builtin_neon_si)__c);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_f32(float32x2_t __a, float32x2_t __b, float32_t __c)
{
return (float32x2_t)__builtin_neon_vmla_nv2sf(__a, __b,
(__builtin_neon_sf)__c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_u16(uint16x4_t __a, uint16x4_t __b, uint16_t __c)
{
return (uint16x4_t)__builtin_neon_vmla_nv4hi((int16x4_t)__a, (int16x4_t)__b,
(__builtin_neon_hi)__c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmla_n_u32(uint32x2_t __a, uint32x2_t __b, uint32_t __c)
{
return (uint32x2_t)__builtin_neon_vmla_nv2si((int32x2_t)__a, (int32x2_t)__b,
(__builtin_neon_si)__c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_s16(int16x8_t __a, int16x8_t __b, int16_t __c)
{
return (int16x8_t)__builtin_neon_vmla_nv8hi(__a, __b,
(__builtin_neon_hi)__c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_s32(int32x4_t __a, int32x4_t __b, int32_t __c)
{
return (int32x4_t)__builtin_neon_vmla_nv4si(__a, __b,
(__builtin_neon_si)__c);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_f32(float32x4_t __a, float32x4_t __b, float32_t __c)
{
return (float32x4_t)__builtin_neon_vmla_nv4sf(__a, __b,
(__builtin_neon_sf)__c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_u16(uint16x8_t __a, uint16x8_t __b, uint16_t __c)
{
return (uint16x8_t)__builtin_neon_vmla_nv8hi((int16x8_t)__a, (int16x8_t)__b,
(__builtin_neon_hi)__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_n_u32(uint32x4_t __a, uint32x4_t __b, uint32_t __c)
{
return (uint32x4_t)__builtin_neon_vmla_nv4si((int32x4_t)__a, (int32x4_t)__b,
(__builtin_neon_si)__c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_n_s16(int32x4_t __a, int16x4_t __b, int16_t __c)
{
return (int32x4_t)__builtin_neon_vmlals_nv4hi(__a, __b,
(__builtin_neon_hi)__c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_n_s32(int64x2_t __a, int32x2_t __b, int32_t __c)
{
return (int64x2_t)__builtin_neon_vmlals_nv2si(__a, __b,
(__builtin_neon_si)__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_n_u16(uint32x4_t __a, uint16x4_t __b, uint16_t __c)
{
return (uint32x4_t)__builtin_neon_vmlalu_nv4hi(
(int32x4_t)__a, (int16x4_t)__b, (__builtin_neon_hi)__c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlal_n_u32(uint64x2_t __a, uint32x2_t __b, uint32_t __c)
{
return (uint64x2_t)__builtin_neon_vmlalu_nv2si(
(int64x2_t)__a, (int32x2_t)__b, (__builtin_neon_si)__c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_n_s16(int32x4_t __a, int16x4_t __b, int16_t __c)
{
return (int32x4_t)__builtin_neon_vqdmlal_nv4hi(__a, __b,
(__builtin_neon_hi)__c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmlal_n_s32(int64x2_t __a, int32x2_t __b, int32_t __c)
{
return (int64x2_t)__builtin_neon_vqdmlal_nv2si(__a, __b,
(__builtin_neon_si)__c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_n_s16(int16x4_t __a, int16x4_t __b, int16_t __c)
{
return (int16x4_t)__builtin_neon_vmls_nv4hi(__a, __b,
(__builtin_neon_hi)__c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_n_s32(int32x2_t __a, int32x2_t __b, int32_t __c)
{
return (int32x2_t)__builtin_neon_vmls_nv2si(__a, __b,
(__builtin_neon_si)__c);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_n_f32(float32x2_t __a, float32x2_t __b, float32_t __c)
{
return (float32x2_t)__builtin_neon_vmls_nv2sf(__a, __b,
(__builtin_neon_sf)__c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_n_u16(uint16x4_t __a, uint16x4_t __b, uint16_t __c)
{
return (uint16x4_t)__builtin_neon_vmls_nv4hi((int16x4_t)__a, (int16x4_t)__b,
(__builtin_neon_hi)__c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmls_n_u32(uint32x2_t __a, uint32x2_t __b, uint32_t __c)
{
return (uint32x2_t)__builtin_neon_vmls_nv2si((int32x2_t)__a, (int32x2_t)__b,
(__builtin_neon_si)__c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_n_s16(int16x8_t __a, int16x8_t __b, int16_t __c)
{
return (int16x8_t)__builtin_neon_vmls_nv8hi(__a, __b,
(__builtin_neon_hi)__c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_n_s32(int32x4_t __a, int32x4_t __b, int32_t __c)
{
return (int32x4_t)__builtin_neon_vmls_nv4si(__a, __b,
(__builtin_neon_si)__c);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_n_f32(float32x4_t __a, float32x4_t __b, float32_t __c)
{
return (float32x4_t)__builtin_neon_vmls_nv4sf(__a, __b,
(__builtin_neon_sf)__c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_n_u16(uint16x8_t __a, uint16x8_t __b, uint16_t __c)
{
return (uint16x8_t)__builtin_neon_vmls_nv8hi((int16x8_t)__a, (int16x8_t)__b,
(__builtin_neon_hi)__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_n_u32(uint32x4_t __a, uint32x4_t __b, uint32_t __c)
{
return (uint32x4_t)__builtin_neon_vmls_nv4si((int32x4_t)__a, (int32x4_t)__b,
(__builtin_neon_si)__c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_n_s16(int32x4_t __a, int16x4_t __b, int16_t __c)
{
return (int32x4_t)__builtin_neon_vmlsls_nv4hi(__a, __b,
(__builtin_neon_hi)__c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_n_s32(int64x2_t __a, int32x2_t __b, int32_t __c)
{
return (int64x2_t)__builtin_neon_vmlsls_nv2si(__a, __b,
(__builtin_neon_si)__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_n_u16(uint32x4_t __a, uint16x4_t __b, uint16_t __c)
{
return (uint32x4_t)__builtin_neon_vmlslu_nv4hi(
(int32x4_t)__a, (int16x4_t)__b, (__builtin_neon_hi)__c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmlsl_n_u32(uint64x2_t __a, uint32x2_t __b, uint32_t __c)
{
return (uint64x2_t)__builtin_neon_vmlslu_nv2si(
(int64x2_t)__a, (int32x2_t)__b, (__builtin_neon_si)__c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_n_s16(int32x4_t __a, int16x4_t __b, int16_t __c)
{
return (int32x4_t)__builtin_neon_vqdmlsl_nv4hi(__a, __b,
(__builtin_neon_hi)__c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vqdmlsl_n_s32(int64x2_t __a, int32x2_t __b, int32_t __c)
{
return (int64x2_t)__builtin_neon_vqdmlsl_nv2si(__a, __b,
(__builtin_neon_si)__c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_p64(poly64x1_t __a, poly64x1_t __b, const int __c)
{
return (poly64x1_t)__builtin_neon_vextdi(__a, __b, __c);
}
#pragma GCC pop_options
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_s8(int8x8_t __a, int8x8_t __b, const int __c)
{
return (int8x8_t)__builtin_neon_vextv8qi(__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_s16(int16x4_t __a, int16x4_t __b, const int __c)
{
return (int16x4_t)__builtin_neon_vextv4hi(__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_s32(int32x2_t __a, int32x2_t __b, const int __c)
{
return (int32x2_t)__builtin_neon_vextv2si(__a, __b, __c);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_s64(int64x1_t __a, int64x1_t __b, const int __c)
{
return (int64x1_t)__builtin_neon_vextdi(__a, __b, __c);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_f32(float32x2_t __a, float32x2_t __b, const int __c)
{
return (float32x2_t)__builtin_neon_vextv2sf(__a, __b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_u8(uint8x8_t __a, uint8x8_t __b, const int __c)
{
return (uint8x8_t)__builtin_neon_vextv8qi((int8x8_t)__a, (int8x8_t)__b,
__c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_u16(uint16x4_t __a, uint16x4_t __b, const int __c)
{
return (uint16x4_t)__builtin_neon_vextv4hi((int16x4_t)__a, (int16x4_t)__b,
__c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_u32(uint32x2_t __a, uint32x2_t __b, const int __c)
{
return (uint32x2_t)__builtin_neon_vextv2si((int32x2_t)__a, (int32x2_t)__b,
__c);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_u64(uint64x1_t __a, uint64x1_t __b, const int __c)
{
return (uint64x1_t)__builtin_neon_vextdi((int64x1_t)__a, (int64x1_t)__b,
__c);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_p8(poly8x8_t __a, poly8x8_t __b, const int __c)
{
return (poly8x8_t)__builtin_neon_vextv8qi((int8x8_t)__a, (int8x8_t)__b,
__c);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_p16(poly16x4_t __a, poly16x4_t __b, const int __c)
{
return (poly16x4_t)__builtin_neon_vextv4hi((int16x4_t)__a, (int16x4_t)__b,
__c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_p64(poly64x2_t __a, poly64x2_t __b, const int __c)
{
return (poly64x2_t)__builtin_neon_vextv2di((int64x2_t)__a, (int64x2_t)__b,
__c);
}
#pragma GCC pop_options
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_s8(int8x16_t __a, int8x16_t __b, const int __c)
{
return (int8x16_t)__builtin_neon_vextv16qi(__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_s16(int16x8_t __a, int16x8_t __b, const int __c)
{
return (int16x8_t)__builtin_neon_vextv8hi(__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_s32(int32x4_t __a, int32x4_t __b, const int __c)
{
return (int32x4_t)__builtin_neon_vextv4si(__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_s64(int64x2_t __a, int64x2_t __b, const int __c)
{
return (int64x2_t)__builtin_neon_vextv2di(__a, __b, __c);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_f32(float32x4_t __a, float32x4_t __b, const int __c)
{
return (float32x4_t)__builtin_neon_vextv4sf(__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_u8(uint8x16_t __a, uint8x16_t __b, const int __c)
{
return (uint8x16_t)__builtin_neon_vextv16qi((int8x16_t)__a, (int8x16_t)__b,
__c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_u16(uint16x8_t __a, uint16x8_t __b, const int __c)
{
return (uint16x8_t)__builtin_neon_vextv8hi((int16x8_t)__a, (int16x8_t)__b,
__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_u32(uint32x4_t __a, uint32x4_t __b, const int __c)
{
return (uint32x4_t)__builtin_neon_vextv4si((int32x4_t)__a, (int32x4_t)__b,
__c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_u64(uint64x2_t __a, uint64x2_t __b, const int __c)
{
return (uint64x2_t)__builtin_neon_vextv2di((int64x2_t)__a, (int64x2_t)__b,
__c);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_p8(poly8x16_t __a, poly8x16_t __b, const int __c)
{
return (poly8x16_t)__builtin_neon_vextv16qi((int8x16_t)__a, (int8x16_t)__b,
__c);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_p16(poly16x8_t __a, poly16x8_t __b, const int __c)
{
return (poly16x8_t)__builtin_neon_vextv8hi((int16x8_t)__a, (int16x8_t)__b,
__c);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64_s8(int8x8_t __a)
{
return (int8x8_t)__builtin_shuffle(__a,
(uint8x8_t){7, 6, 5, 4, 3, 2, 1, 0});
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64_s16(int16x4_t __a)
{
return (int16x4_t)__builtin_shuffle(__a, (uint16x4_t){3, 2, 1, 0});
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64_s32(int32x2_t __a)
{
return (int32x2_t)__builtin_shuffle(__a, (uint32x2_t){1, 0});
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64_f32(float32x2_t __a)
{
return (float32x2_t)__builtin_shuffle(__a, (uint32x2_t){1, 0});
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64_u8(uint8x8_t __a)
{
return (uint8x8_t)__builtin_shuffle(__a,
(uint8x8_t){7, 6, 5, 4, 3, 2, 1, 0});
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64_u16(uint16x4_t __a)
{
return (uint16x4_t)__builtin_shuffle(__a, (uint16x4_t){3, 2, 1, 0});
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64_u32(uint32x2_t __a)
{
return (uint32x2_t)__builtin_shuffle(__a, (uint32x2_t){1, 0});
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64_p8(poly8x8_t __a)
{
return (poly8x8_t)__builtin_shuffle(__a,
(uint8x8_t){7, 6, 5, 4, 3, 2, 1, 0});
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64_p16(poly16x4_t __a)
{
return (poly16x4_t)__builtin_shuffle(__a, (uint16x4_t){3, 2, 1, 0});
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_s8(int8x16_t __a)
{
return (int8x16_t)__builtin_shuffle(
__a,
(uint8x16_t){7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8});
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_s16(int16x8_t __a)
{
return (int16x8_t)__builtin_shuffle(__a,
(uint16x8_t){3, 2, 1, 0, 7, 6, 5, 4});
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_s32(int32x4_t __a)
{
return (int32x4_t)__builtin_shuffle(__a, (uint32x4_t){1, 0, 3, 2});
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_f32(float32x4_t __a)
{
return (float32x4_t)__builtin_shuffle(__a, (uint32x4_t){1, 0, 3, 2});
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_u8(uint8x16_t __a)
{
return (uint8x16_t)__builtin_shuffle(
__a,
(uint8x16_t){7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8});
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_u16(uint16x8_t __a)
{
return (uint16x8_t)__builtin_shuffle(__a,
(uint16x8_t){3, 2, 1, 0, 7, 6, 5, 4});
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_u32(uint32x4_t __a)
{
return (uint32x4_t)__builtin_shuffle(__a, (uint32x4_t){1, 0, 3, 2});
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_p8(poly8x16_t __a)
{
return (poly8x16_t)__builtin_shuffle(
__a,
(uint8x16_t){7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8});
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_p16(poly16x8_t __a)
{
return (poly16x8_t)__builtin_shuffle(__a,
(uint16x8_t){3, 2, 1, 0, 7, 6, 5, 4});
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev32_s8(int8x8_t __a)
{
return (int8x8_t)__builtin_shuffle(__a,
(uint8x8_t){3, 2, 1, 0, 7, 6, 5, 4});
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev32_s16(int16x4_t __a)
{
return (int16x4_t)__builtin_shuffle(__a, (uint16x4_t){1, 0, 3, 2});
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev32_u8(uint8x8_t __a)
{
return (uint8x8_t)__builtin_shuffle(__a,
(uint8x8_t){3, 2, 1, 0, 7, 6, 5, 4});
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev32_u16(uint16x4_t __a)
{
return (uint16x4_t)__builtin_shuffle(__a, (uint16x4_t){1, 0, 3, 2});
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev32_p8(poly8x8_t __a)
{
return (poly8x8_t)__builtin_shuffle(__a,
(uint8x8_t){3, 2, 1, 0, 7, 6, 5, 4});
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev32_p16(poly16x4_t __a)
{
return (poly16x4_t)__builtin_shuffle(__a, (uint16x4_t){1, 0, 3, 2});
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev32q_s8(int8x16_t __a)
{
return (int8x16_t)__builtin_shuffle(
__a,
(uint8x16_t){3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12});
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev32q_s16(int16x8_t __a)
{
return (int16x8_t)__builtin_shuffle(__a,
(uint16x8_t){1, 0, 3, 2, 5, 4, 7, 6});
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev32q_u8(uint8x16_t __a)
{
return (uint8x16_t)__builtin_shuffle(
__a,
(uint8x16_t){3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12});
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev32q_u16(uint16x8_t __a)
{
return (uint16x8_t)__builtin_shuffle(__a,
(uint16x8_t){1, 0, 3, 2, 5, 4, 7, 6});
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev32q_p8(poly8x16_t __a)
{
return (poly8x16_t)__builtin_shuffle(
__a,
(uint8x16_t){3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12});
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev32q_p16(poly16x8_t __a)
{
return (poly16x8_t)__builtin_shuffle(__a,
(uint16x8_t){1, 0, 3, 2, 5, 4, 7, 6});
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev16_s8(int8x8_t __a)
{
return (int8x8_t)__builtin_shuffle(__a,
(uint8x8_t){1, 0, 3, 2, 5, 4, 7, 6});
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev16_u8(uint8x8_t __a)
{
return (uint8x8_t)__builtin_shuffle(__a,
(uint8x8_t){1, 0, 3, 2, 5, 4, 7, 6});
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev16_p8(poly8x8_t __a)
{
return (poly8x8_t)__builtin_shuffle(__a,
(uint8x8_t){1, 0, 3, 2, 5, 4, 7, 6});
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev16q_s8(int8x16_t __a)
{
return (int8x16_t)__builtin_shuffle(
__a,
(uint8x16_t){1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14});
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev16q_u8(uint8x16_t __a)
{
return (uint8x16_t)__builtin_shuffle(
__a,
(uint8x16_t){1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14});
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev16q_p8(poly8x16_t __a)
{
return (poly8x16_t)__builtin_shuffle(
__a,
(uint8x16_t){1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14});
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_p64(uint64x1_t __a, poly64x1_t __b, poly64x1_t __c)
{
return (poly64x1_t)__builtin_neon_vbsldi((int64x1_t)__a, __b, __c);
}
#pragma GCC pop_options
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_s8(uint8x8_t __a, int8x8_t __b, int8x8_t __c)
{
return (int8x8_t)__builtin_neon_vbslv8qi((int8x8_t)__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_s16(uint16x4_t __a, int16x4_t __b, int16x4_t __c)
{
return (int16x4_t)__builtin_neon_vbslv4hi((int16x4_t)__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_s32(uint32x2_t __a, int32x2_t __b, int32x2_t __c)
{
return (int32x2_t)__builtin_neon_vbslv2si((int32x2_t)__a, __b, __c);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_s64(uint64x1_t __a, int64x1_t __b, int64x1_t __c)
{
return (int64x1_t)__builtin_neon_vbsldi((int64x1_t)__a, __b, __c);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_f32(uint32x2_t __a, float32x2_t __b, float32x2_t __c)
{
return (float32x2_t)__builtin_neon_vbslv2sf((int32x2_t)__a, __b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_u8(uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
return (uint8x8_t)__builtin_neon_vbslv8qi((int8x8_t)__a, (int8x8_t)__b,
(int8x8_t)__c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_u16(uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
return (uint16x4_t)__builtin_neon_vbslv4hi((int16x4_t)__a, (int16x4_t)__b,
(int16x4_t)__c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_u32(uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
return (uint32x2_t)__builtin_neon_vbslv2si((int32x2_t)__a, (int32x2_t)__b,
(int32x2_t)__c);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_u64(uint64x1_t __a, uint64x1_t __b, uint64x1_t __c)
{
return (uint64x1_t)__builtin_neon_vbsldi((int64x1_t)__a, (int64x1_t)__b,
(int64x1_t)__c);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_p8(uint8x8_t __a, poly8x8_t __b, poly8x8_t __c)
{
return (poly8x8_t)__builtin_neon_vbslv8qi((int8x8_t)__a, (int8x8_t)__b,
(int8x8_t)__c);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_p16(uint16x4_t __a, poly16x4_t __b, poly16x4_t __c)
{
return (poly16x4_t)__builtin_neon_vbslv4hi((int16x4_t)__a, (int16x4_t)__b,
(int16x4_t)__c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_p64(uint64x2_t __a, poly64x2_t __b, poly64x2_t __c)
{
return (poly64x2_t)__builtin_neon_vbslv2di((int64x2_t)__a, (int64x2_t)__b,
(int64x2_t)__c);
}
#pragma GCC pop_options
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_s8(uint8x16_t __a, int8x16_t __b, int8x16_t __c)
{
return (int8x16_t)__builtin_neon_vbslv16qi((int8x16_t)__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_s16(uint16x8_t __a, int16x8_t __b, int16x8_t __c)
{
return (int16x8_t)__builtin_neon_vbslv8hi((int16x8_t)__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_s32(uint32x4_t __a, int32x4_t __b, int32x4_t __c)
{
return (int32x4_t)__builtin_neon_vbslv4si((int32x4_t)__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_s64(uint64x2_t __a, int64x2_t __b, int64x2_t __c)
{
return (int64x2_t)__builtin_neon_vbslv2di((int64x2_t)__a, __b, __c);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_f32(uint32x4_t __a, float32x4_t __b, float32x4_t __c)
{
return (float32x4_t)__builtin_neon_vbslv4sf((int32x4_t)__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_u8(uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
{
return (uint8x16_t)__builtin_neon_vbslv16qi((int8x16_t)__a, (int8x16_t)__b,
(int8x16_t)__c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_u16(uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
return (uint16x8_t)__builtin_neon_vbslv8hi((int16x8_t)__a, (int16x8_t)__b,
(int16x8_t)__c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_u32(uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
return (uint32x4_t)__builtin_neon_vbslv4si((int32x4_t)__a, (int32x4_t)__b,
(int32x4_t)__c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_u64(uint64x2_t __a, uint64x2_t __b, uint64x2_t __c)
{
return (uint64x2_t)__builtin_neon_vbslv2di((int64x2_t)__a, (int64x2_t)__b,
(int64x2_t)__c);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_p8(uint8x16_t __a, poly8x16_t __b, poly8x16_t __c)
{
return (poly8x16_t)__builtin_neon_vbslv16qi((int8x16_t)__a, (int8x16_t)__b,
(int8x16_t)__c);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_p16(uint16x8_t __a, poly16x8_t __b, poly16x8_t __c)
{
return (poly16x8_t)__builtin_neon_vbslv8hi((int16x8_t)__a, (int16x8_t)__b,
(int16x8_t)__c);
}
/* For big-endian, the shuffle masks for ZIP, UZP and TRN must be changed as
follows. (nelt = the number of elements within a vector.)
Firstly, a value of N within a mask, becomes (N ^ (nelt - 1)), as gcc vector
extension's indexing scheme is reversed *within each vector* (relative to the
neon intrinsics view), but without changing which of the two vectors.
Secondly, the elements within each mask are reversed, as the mask is itself a
vector, and will itself be loaded in reverse order (again, relative to the
neon intrinsics view, i.e. that would result from a "vld1" instruction). */
__extension__ extern __inline int8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrn_s8(int8x8_t __a, int8x8_t __b)
{
int8x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){9, 1, 11, 3, 13, 5, 15, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){8, 0, 10, 2, 12, 4, 14, 6});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){0, 8, 2, 10, 4, 12, 6, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){1, 9, 3, 11, 5, 13, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline int16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrn_s16(int16x4_t __a, int16x4_t __b)
{
int16x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){5, 1, 7, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){4, 0, 6, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){0, 4, 2, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){1, 5, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline uint8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrn_u8(uint8x8_t __a, uint8x8_t __b)
{
uint8x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){9, 1, 11, 3, 13, 5, 15, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){8, 0, 10, 2, 12, 4, 14, 6});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){0, 8, 2, 10, 4, 12, 6, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){1, 9, 3, 11, 5, 13, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline uint16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrn_u16(uint16x4_t __a, uint16x4_t __b)
{
uint16x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){5, 1, 7, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){4, 0, 6, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){0, 4, 2, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){1, 5, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline poly8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrn_p8(poly8x8_t __a, poly8x8_t __b)
{
poly8x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){9, 1, 11, 3, 13, 5, 15, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){8, 0, 10, 2, 12, 4, 14, 6});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){0, 8, 2, 10, 4, 12, 6, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){1, 9, 3, 11, 5, 13, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline poly16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrn_p16(poly16x4_t __a, poly16x4_t __b)
{
poly16x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){5, 1, 7, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){4, 0, 6, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){0, 4, 2, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){1, 5, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline int32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrn_s32(int32x2_t __a, int32x2_t __b)
{
int32x2x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){3, 1});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){2, 0});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){0, 2});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){1, 3});
#endif
return __rv;
}
__extension__ extern __inline float32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrn_f32(float32x2_t __a, float32x2_t __b)
{
float32x2x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){3, 1});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){2, 0});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){0, 2});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){1, 3});
#endif
return __rv;
}
__extension__ extern __inline uint32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrn_u32(uint32x2_t __a, uint32x2_t __b)
{
uint32x2x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){3, 1});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){2, 0});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){0, 2});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){1, 3});
#endif
return __rv;
}
__extension__ extern __inline int8x16x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_s8(int8x16_t __a, int8x16_t __b)
{
int8x16x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b,
(uint8x16_t){17, 1, 19, 3, 21, 5, 23, 7, 25,
9, 27, 11, 29, 13, 31, 15});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){16, 0, 18, 2, 20, 4, 22, 6, 24,
8, 26, 10, 28, 12, 30, 14});
#else
__rv.val[0] = __builtin_shuffle(__a, __b,
(uint8x16_t){0, 16, 2, 18, 4, 20, 6, 22, 8,
24, 10, 26, 12, 28, 14, 30});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){1, 17, 3, 19, 5, 21, 7, 23, 9,
25, 11, 27, 13, 29, 15, 31});
#endif
return __rv;
}
__extension__ extern __inline int16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_s16(int16x8_t __a, int16x8_t __b)
{
int16x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){9, 1, 11, 3, 13, 5, 15, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){8, 0, 10, 2, 12, 4, 14, 6});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){0, 8, 2, 10, 4, 12, 6, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){1, 9, 3, 11, 5, 13, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline int32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_s32(int32x4_t __a, int32x4_t __b)
{
int32x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){5, 1, 7, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){4, 0, 6, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){0, 4, 2, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){1, 5, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline float32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_f32(float32x4_t __a, float32x4_t __b)
{
float32x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){5, 1, 7, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){4, 0, 6, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){0, 4, 2, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){1, 5, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline uint8x16x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_u8(uint8x16_t __a, uint8x16_t __b)
{
uint8x16x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b,
(uint8x16_t){17, 1, 19, 3, 21, 5, 23, 7, 25,
9, 27, 11, 29, 13, 31, 15});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){16, 0, 18, 2, 20, 4, 22, 6, 24,
8, 26, 10, 28, 12, 30, 14});
#else
__rv.val[0] = __builtin_shuffle(__a, __b,
(uint8x16_t){0, 16, 2, 18, 4, 20, 6, 22, 8,
24, 10, 26, 12, 28, 14, 30});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){1, 17, 3, 19, 5, 21, 7, 23, 9,
25, 11, 27, 13, 29, 15, 31});
#endif
return __rv;
}
__extension__ extern __inline uint16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_u16(uint16x8_t __a, uint16x8_t __b)
{
uint16x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){9, 1, 11, 3, 13, 5, 15, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){8, 0, 10, 2, 12, 4, 14, 6});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){0, 8, 2, 10, 4, 12, 6, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){1, 9, 3, 11, 5, 13, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline uint32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_u32(uint32x4_t __a, uint32x4_t __b)
{
uint32x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){5, 1, 7, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){4, 0, 6, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){0, 4, 2, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){1, 5, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline poly8x16x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_p8(poly8x16_t __a, poly8x16_t __b)
{
poly8x16x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b,
(uint8x16_t){17, 1, 19, 3, 21, 5, 23, 7, 25,
9, 27, 11, 29, 13, 31, 15});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){16, 0, 18, 2, 20, 4, 22, 6, 24,
8, 26, 10, 28, 12, 30, 14});
#else
__rv.val[0] = __builtin_shuffle(__a, __b,
(uint8x16_t){0, 16, 2, 18, 4, 20, 6, 22, 8,
24, 10, 26, 12, 28, 14, 30});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){1, 17, 3, 19, 5, 21, 7, 23, 9,
25, 11, 27, 13, 29, 15, 31});
#endif
return __rv;
}
__extension__ extern __inline poly16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_p16(poly16x8_t __a, poly16x8_t __b)
{
poly16x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){9, 1, 11, 3, 13, 5, 15, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){8, 0, 10, 2, 12, 4, 14, 6});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){0, 8, 2, 10, 4, 12, 6, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){1, 9, 3, 11, 5, 13, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline int8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzip_s8(int8x8_t __a, int8x8_t __b)
{
int8x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){12, 4, 13, 5, 14, 6, 15, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){8, 0, 9, 1, 10, 2, 11, 3});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){0, 8, 1, 9, 2, 10, 3, 11});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){4, 12, 5, 13, 6, 14, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline int16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzip_s16(int16x4_t __a, int16x4_t __b)
{
int16x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){6, 2, 7, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){4, 0, 5, 1});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){0, 4, 1, 5});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){2, 6, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline uint8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzip_u8(uint8x8_t __a, uint8x8_t __b)
{
uint8x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){12, 4, 13, 5, 14, 6, 15, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){8, 0, 9, 1, 10, 2, 11, 3});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){0, 8, 1, 9, 2, 10, 3, 11});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){4, 12, 5, 13, 6, 14, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline uint16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzip_u16(uint16x4_t __a, uint16x4_t __b)
{
uint16x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){6, 2, 7, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){4, 0, 5, 1});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){0, 4, 1, 5});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){2, 6, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline poly8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzip_p8(poly8x8_t __a, poly8x8_t __b)
{
poly8x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){12, 4, 13, 5, 14, 6, 15, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){8, 0, 9, 1, 10, 2, 11, 3});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){0, 8, 1, 9, 2, 10, 3, 11});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){4, 12, 5, 13, 6, 14, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline poly16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzip_p16(poly16x4_t __a, poly16x4_t __b)
{
poly16x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){6, 2, 7, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){4, 0, 5, 1});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){0, 4, 1, 5});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){2, 6, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline int32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzip_s32(int32x2_t __a, int32x2_t __b)
{
int32x2x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){3, 1});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){2, 0});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){0, 2});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){1, 3});
#endif
return __rv;
}
__extension__ extern __inline float32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzip_f32(float32x2_t __a, float32x2_t __b)
{
float32x2x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){3, 1});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){2, 0});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){0, 2});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){1, 3});
#endif
return __rv;
}
__extension__ extern __inline uint32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzip_u32(uint32x2_t __a, uint32x2_t __b)
{
uint32x2x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){3, 1});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){2, 0});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){0, 2});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){1, 3});
#endif
return __rv;
}
__extension__ extern __inline int8x16x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzipq_s8(int8x16_t __a, int8x16_t __b)
{
int8x16x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(
__a, __b,
(uint8x16_t){20, 4, 21, 5, 22, 6, 23, 7, 16, 0, 17, 1, 18, 2, 19, 3});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){28, 12, 29, 13, 30, 14, 31, 15,
24, 8, 25, 9, 26, 10, 27, 11});
#else
__rv.val[0] = __builtin_shuffle(
__a, __b,
(uint8x16_t){0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23});
__rv.val[1] =
__builtin_shuffle(__a, __b,
(uint8x16_t){8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13,
29, 14, 30, 15, 31});
#endif
return __rv;
}
__extension__ extern __inline int16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzipq_s16(int16x8_t __a, int16x8_t __b)
{
int16x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){10, 2, 11, 3, 8, 0, 9, 1});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){14, 6, 15, 7, 12, 4, 13, 5});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){0, 8, 1, 9, 2, 10, 3, 11});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){4, 12, 5, 13, 6, 14, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline int32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzipq_s32(int32x4_t __a, int32x4_t __b)
{
int32x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){5, 1, 4, 0});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){7, 3, 6, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){0, 4, 1, 5});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){2, 6, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline float32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzipq_f32(float32x4_t __a, float32x4_t __b)
{
float32x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){5, 1, 4, 0});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){7, 3, 6, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){0, 4, 1, 5});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){2, 6, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline uint8x16x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzipq_u8(uint8x16_t __a, uint8x16_t __b)
{
uint8x16x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(
__a, __b,
(uint8x16_t){20, 4, 21, 5, 22, 6, 23, 7, 16, 0, 17, 1, 18, 2, 19, 3});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){28, 12, 29, 13, 30, 14, 31, 15,
24, 8, 25, 9, 26, 10, 27, 11});
#else
__rv.val[0] = __builtin_shuffle(
__a, __b,
(uint8x16_t){0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23});
__rv.val[1] =
__builtin_shuffle(__a, __b,
(uint8x16_t){8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13,
29, 14, 30, 15, 31});
#endif
return __rv;
}
__extension__ extern __inline uint16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzipq_u16(uint16x8_t __a, uint16x8_t __b)
{
uint16x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){10, 2, 11, 3, 8, 0, 9, 1});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){14, 6, 15, 7, 12, 4, 13, 5});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){0, 8, 1, 9, 2, 10, 3, 11});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){4, 12, 5, 13, 6, 14, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline uint32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzipq_u32(uint32x4_t __a, uint32x4_t __b)
{
uint32x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){5, 1, 4, 0});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){7, 3, 6, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){0, 4, 1, 5});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){2, 6, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline poly8x16x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzipq_p8(poly8x16_t __a, poly8x16_t __b)
{
poly8x16x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(
__a, __b,
(uint8x16_t){20, 4, 21, 5, 22, 6, 23, 7, 16, 0, 17, 1, 18, 2, 19, 3});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){28, 12, 29, 13, 30, 14, 31, 15,
24, 8, 25, 9, 26, 10, 27, 11});
#else
__rv.val[0] = __builtin_shuffle(
__a, __b,
(uint8x16_t){0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23});
__rv.val[1] =
__builtin_shuffle(__a, __b,
(uint8x16_t){8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13,
29, 14, 30, 15, 31});
#endif
return __rv;
}
__extension__ extern __inline poly16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzipq_p16(poly16x8_t __a, poly16x8_t __b)
{
poly16x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){10, 2, 11, 3, 8, 0, 9, 1});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){14, 6, 15, 7, 12, 4, 13, 5});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){0, 8, 1, 9, 2, 10, 3, 11});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){4, 12, 5, 13, 6, 14, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline int8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzp_s8(int8x8_t __a, int8x8_t __b)
{
int8x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){9, 11, 13, 15, 1, 3, 5, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){8, 10, 12, 14, 0, 2, 4, 6});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){0, 2, 4, 6, 8, 10, 12, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){1, 3, 5, 7, 9, 11, 13, 15});
#endif
return __rv;
}
__extension__ extern __inline int16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzp_s16(int16x4_t __a, int16x4_t __b)
{
int16x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){5, 7, 1, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){4, 6, 0, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){0, 2, 4, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){1, 3, 5, 7});
#endif
return __rv;
}
__extension__ extern __inline int32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzp_s32(int32x2_t __a, int32x2_t __b)
{
int32x2x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){3, 1});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){2, 0});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){0, 2});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){1, 3});
#endif
return __rv;
}
__extension__ extern __inline float32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzp_f32(float32x2_t __a, float32x2_t __b)
{
float32x2x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){3, 1});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){2, 0});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){0, 2});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){1, 3});
#endif
return __rv;
}
__extension__ extern __inline uint8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzp_u8(uint8x8_t __a, uint8x8_t __b)
{
uint8x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){9, 11, 13, 15, 1, 3, 5, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){8, 10, 12, 14, 0, 2, 4, 6});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){0, 2, 4, 6, 8, 10, 12, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){1, 3, 5, 7, 9, 11, 13, 15});
#endif
return __rv;
}
__extension__ extern __inline uint16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzp_u16(uint16x4_t __a, uint16x4_t __b)
{
uint16x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){5, 7, 1, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){4, 6, 0, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){0, 2, 4, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){1, 3, 5, 7});
#endif
return __rv;
}
__extension__ extern __inline uint32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzp_u32(uint32x2_t __a, uint32x2_t __b)
{
uint32x2x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){3, 1});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){2, 0});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x2_t){0, 2});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x2_t){1, 3});
#endif
return __rv;
}
__extension__ extern __inline poly8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzp_p8(poly8x8_t __a, poly8x8_t __b)
{
poly8x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){9, 11, 13, 15, 1, 3, 5, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){8, 10, 12, 14, 0, 2, 4, 6});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint8x8_t){0, 2, 4, 6, 8, 10, 12, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint8x8_t){1, 3, 5, 7, 9, 11, 13, 15});
#endif
return __rv;
}
__extension__ extern __inline poly16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzp_p16(poly16x4_t __a, poly16x4_t __b)
{
poly16x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){5, 7, 1, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){4, 6, 0, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){0, 2, 4, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){1, 3, 5, 7});
#endif
return __rv;
}
__extension__ extern __inline int8x16x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzpq_s8(int8x16_t __a, int8x16_t __b)
{
int8x16x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b,
(uint8x16_t){9, 11, 13, 15, 1, 3, 5, 7, 25,
27, 29, 31, 17, 19, 21, 23});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){8, 10, 12, 14, 0, 2, 4, 6, 24,
26, 28, 30, 16, 18, 20, 22});
#else
__rv.val[0] = __builtin_shuffle(__a, __b,
(uint8x16_t){0, 2, 4, 6, 8, 10, 12, 14, 16,
18, 20, 22, 24, 26, 28, 30});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){1, 3, 5, 7, 9, 11, 13, 15, 17,
19, 21, 23, 25, 27, 29, 31});
#endif
return __rv;
}
__extension__ extern __inline int16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzpq_s16(int16x8_t __a, int16x8_t __b)
{
int16x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){5, 7, 1, 3, 13, 15, 9, 11});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){4, 6, 0, 2, 12, 14, 8, 10});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){0, 2, 4, 6, 8, 10, 12, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){1, 3, 5, 7, 9, 11, 13, 15});
#endif
return __rv;
}
__extension__ extern __inline int32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzpq_s32(int32x4_t __a, int32x4_t __b)
{
int32x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){3, 1, 7, 5});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){2, 0, 6, 4});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){0, 2, 4, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){1, 3, 5, 7});
#endif
return __rv;
}
__extension__ extern __inline float32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzpq_f32(float32x4_t __a, float32x4_t __b)
{
float32x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){3, 1, 7, 5});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){2, 0, 6, 4});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){0, 2, 4, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){1, 3, 5, 7});
#endif
return __rv;
}
__extension__ extern __inline uint8x16x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzpq_u8(uint8x16_t __a, uint8x16_t __b)
{
uint8x16x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b,
(uint8x16_t){9, 11, 13, 15, 1, 3, 5, 7, 25,
27, 29, 31, 17, 19, 21, 23});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){8, 10, 12, 14, 0, 2, 4, 6, 24,
26, 28, 30, 16, 18, 20, 22});
#else
__rv.val[0] = __builtin_shuffle(__a, __b,
(uint8x16_t){0, 2, 4, 6, 8, 10, 12, 14, 16,
18, 20, 22, 24, 26, 28, 30});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){1, 3, 5, 7, 9, 11, 13, 15, 17,
19, 21, 23, 25, 27, 29, 31});
#endif
return __rv;
}
__extension__ extern __inline uint16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzpq_u16(uint16x8_t __a, uint16x8_t __b)
{
uint16x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){5, 7, 1, 3, 13, 15, 9, 11});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){4, 6, 0, 2, 12, 14, 8, 10});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){0, 2, 4, 6, 8, 10, 12, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){1, 3, 5, 7, 9, 11, 13, 15});
#endif
return __rv;
}
__extension__ extern __inline uint32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzpq_u32(uint32x4_t __a, uint32x4_t __b)
{
uint32x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){3, 1, 7, 5});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){2, 0, 6, 4});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint32x4_t){0, 2, 4, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint32x4_t){1, 3, 5, 7});
#endif
return __rv;
}
__extension__ extern __inline poly8x16x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzpq_p8(poly8x16_t __a, poly8x16_t __b)
{
poly8x16x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b,
(uint8x16_t){9, 11, 13, 15, 1, 3, 5, 7, 25,
27, 29, 31, 17, 19, 21, 23});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){8, 10, 12, 14, 0, 2, 4, 6, 24,
26, 28, 30, 16, 18, 20, 22});
#else
__rv.val[0] = __builtin_shuffle(__a, __b,
(uint8x16_t){0, 2, 4, 6, 8, 10, 12, 14, 16,
18, 20, 22, 24, 26, 28, 30});
__rv.val[1] = __builtin_shuffle(__a, __b,
(uint8x16_t){1, 3, 5, 7, 9, 11, 13, 15, 17,
19, 21, 23, 25, 27, 29, 31});
#endif
return __rv;
}
__extension__ extern __inline poly16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzpq_p16(poly16x8_t __a, poly16x8_t __b)
{
poly16x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){5, 7, 1, 3, 13, 15, 9, 11});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){4, 6, 0, 2, 12, 14, 8, 10});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){0, 2, 4, 6, 8, 10, 12, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){1, 3, 5, 7, 9, 11, 13, 15});
#endif
return __rv;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_p64(const poly64_t* __a)
{
return (poly64x1_t)__builtin_neon_vld1di((const __builtin_neon_di*)__a);
}
#pragma GCC pop_options
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_s8(const int8_t* __a)
{
return (int8x8_t)__builtin_neon_vld1v8qi((const __builtin_neon_qi*)__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_s16(const int16_t* __a)
{
return (int16x4_t)__builtin_neon_vld1v4hi((const __builtin_neon_hi*)__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_s32(const int32_t* __a)
{
return (int32x2_t)__builtin_neon_vld1v2si((const __builtin_neon_si*)__a);
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_s64(const int64_t* __a)
{
return (int64x1_t)__builtin_neon_vld1di((const __builtin_neon_di*)__a);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_f16(const float16_t* __a)
{
return __builtin_neon_vld1v4hf(__a);
}
#endif
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_f32(const float32_t* __a)
{
return (float32x2_t)__builtin_neon_vld1v2sf((const __builtin_neon_sf*)__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_u8(const uint8_t* __a)
{
return (uint8x8_t)__builtin_neon_vld1v8qi((const __builtin_neon_qi*)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_u16(const uint16_t* __a)
{
return (uint16x4_t)__builtin_neon_vld1v4hi((const __builtin_neon_hi*)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_u32(const uint32_t* __a)
{
return (uint32x2_t)__builtin_neon_vld1v2si((const __builtin_neon_si*)__a);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_u64(const uint64_t* __a)
{
return (uint64x1_t)__builtin_neon_vld1di((const __builtin_neon_di*)__a);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_p8(const poly8_t* __a)
{
return (poly8x8_t)__builtin_neon_vld1v8qi((const __builtin_neon_qi*)__a);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_p16(const poly16_t* __a)
{
return (poly16x4_t)__builtin_neon_vld1v4hi((const __builtin_neon_hi*)__a);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_p64(const poly64_t* __a)
{
return (poly64x2_t)__builtin_neon_vld1v2di((const __builtin_neon_di*)__a);
}
#pragma GCC pop_options
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_s8(const int8_t* __a)
{
return (int8x16_t)__builtin_neon_vld1v16qi((const __builtin_neon_qi*)__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_s16(const int16_t* __a)
{
return (int16x8_t)__builtin_neon_vld1v8hi((const __builtin_neon_hi*)__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_s32(const int32_t* __a)
{
return (int32x4_t)__builtin_neon_vld1v4si((const __builtin_neon_si*)__a);
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_s64(const int64_t* __a)
{
return (int64x2_t)__builtin_neon_vld1v2di((const __builtin_neon_di*)__a);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_f16(const float16_t* __a)
{
return __builtin_neon_vld1v8hf(__a);
}
#endif
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_f32(const float32_t* __a)
{
return (float32x4_t)__builtin_neon_vld1v4sf((const __builtin_neon_sf*)__a);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u8(const uint8_t* __a)
{
return (uint8x16_t)__builtin_neon_vld1v16qi((const __builtin_neon_qi*)__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u16(const uint16_t* __a)
{
return (uint16x8_t)__builtin_neon_vld1v8hi((const __builtin_neon_hi*)__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u32(const uint32_t* __a)
{
return (uint32x4_t)__builtin_neon_vld1v4si((const __builtin_neon_si*)__a);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_u64(const uint64_t* __a)
{
return (uint64x2_t)__builtin_neon_vld1v2di((const __builtin_neon_di*)__a);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_p8(const poly8_t* __a)
{
return (poly8x16_t)__builtin_neon_vld1v16qi((const __builtin_neon_qi*)__a);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_p16(const poly16_t* __a)
{
return (poly16x8_t)__builtin_neon_vld1v8hi((const __builtin_neon_hi*)__a);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_s8(const int8_t* __a, int8x8_t __b, const int __c)
{
return (int8x8_t)__builtin_neon_vld1_lanev8qi((const __builtin_neon_qi*)__a,
__b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_s16(const int16_t* __a, int16x4_t __b, const int __c)
{
return (int16x4_t)__builtin_neon_vld1_lanev4hi(
(const __builtin_neon_hi*)__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_s32(const int32_t* __a, int32x2_t __b, const int __c)
{
return (int32x2_t)__builtin_neon_vld1_lanev2si(
(const __builtin_neon_si*)__a, __b, __c);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_f16(const float16_t* __a, float16x4_t __b, const int __c)
{
return vset_lane_f16(*__a, __b, __c);
}
#endif
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_f32(const float32_t* __a, float32x2_t __b, const int __c)
{
return (float32x2_t)__builtin_neon_vld1_lanev2sf(
(const __builtin_neon_sf*)__a, __b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_u8(const uint8_t* __a, uint8x8_t __b, const int __c)
{
return (uint8x8_t)__builtin_neon_vld1_lanev8qi(
(const __builtin_neon_qi*)__a, (int8x8_t)__b, __c);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_u16(const uint16_t* __a, uint16x4_t __b, const int __c)
{
return (uint16x4_t)__builtin_neon_vld1_lanev4hi(
(const __builtin_neon_hi*)__a, (int16x4_t)__b, __c);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_u32(const uint32_t* __a, uint32x2_t __b, const int __c)
{
return (uint32x2_t)__builtin_neon_vld1_lanev2si(
(const __builtin_neon_si*)__a, (int32x2_t)__b, __c);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_p8(const poly8_t* __a, poly8x8_t __b, const int __c)
{
return (poly8x8_t)__builtin_neon_vld1_lanev8qi(
(const __builtin_neon_qi*)__a, (int8x8_t)__b, __c);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_p16(const poly16_t* __a, poly16x4_t __b, const int __c)
{
return (poly16x4_t)__builtin_neon_vld1_lanev4hi(
(const __builtin_neon_hi*)__a, (int16x4_t)__b, __c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_p64(const poly64_t* __a, poly64x1_t __b, const int __c)
{
return (poly64x1_t)__builtin_neon_vld1_lanedi((const __builtin_neon_di*)__a,
__b, __c);
}
#pragma GCC pop_options
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_s64(const int64_t* __a, int64x1_t __b, const int __c)
{
return (int64x1_t)__builtin_neon_vld1_lanedi((const __builtin_neon_di*)__a,
__b, __c);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_lane_u64(const uint64_t* __a, uint64x1_t __b, const int __c)
{
return (uint64x1_t)__builtin_neon_vld1_lanedi((const __builtin_neon_di*)__a,
(int64x1_t)__b, __c);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_s8(const int8_t* __a, int8x16_t __b, const int __c)
{
return (int8x16_t)__builtin_neon_vld1_lanev16qi(
(const __builtin_neon_qi*)__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_s16(const int16_t* __a, int16x8_t __b, const int __c)
{
return (int16x8_t)__builtin_neon_vld1_lanev8hi(
(const __builtin_neon_hi*)__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_s32(const int32_t* __a, int32x4_t __b, const int __c)
{
return (int32x4_t)__builtin_neon_vld1_lanev4si(
(const __builtin_neon_si*)__a, __b, __c);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_f16(const float16_t* __a, float16x8_t __b, const int __c)
{
return vsetq_lane_f16(*__a, __b, __c);
}
#endif
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_f32(const float32_t* __a, float32x4_t __b, const int __c)
{
return (float32x4_t)__builtin_neon_vld1_lanev4sf(
(const __builtin_neon_sf*)__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_u8(const uint8_t* __a, uint8x16_t __b, const int __c)
{
return (uint8x16_t)__builtin_neon_vld1_lanev16qi(
(const __builtin_neon_qi*)__a, (int8x16_t)__b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_u16(const uint16_t* __a, uint16x8_t __b, const int __c)
{
return (uint16x8_t)__builtin_neon_vld1_lanev8hi(
(const __builtin_neon_hi*)__a, (int16x8_t)__b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_u32(const uint32_t* __a, uint32x4_t __b, const int __c)
{
return (uint32x4_t)__builtin_neon_vld1_lanev4si(
(const __builtin_neon_si*)__a, (int32x4_t)__b, __c);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_p8(const poly8_t* __a, poly8x16_t __b, const int __c)
{
return (poly8x16_t)__builtin_neon_vld1_lanev16qi(
(const __builtin_neon_qi*)__a, (int8x16_t)__b, __c);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_p16(const poly16_t* __a, poly16x8_t __b, const int __c)
{
return (poly16x8_t)__builtin_neon_vld1_lanev8hi(
(const __builtin_neon_hi*)__a, (int16x8_t)__b, __c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_p64(const poly64_t* __a, poly64x2_t __b, const int __c)
{
return (poly64x2_t)__builtin_neon_vld1_lanev2di(
(const __builtin_neon_di*)__a, (int64x2_t)__b, __c);
}
#pragma GCC pop_options
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_s64(const int64_t* __a, int64x2_t __b, const int __c)
{
return (int64x2_t)__builtin_neon_vld1_lanev2di(
(const __builtin_neon_di*)__a, __b, __c);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_lane_u64(const uint64_t* __a, uint64x2_t __b, const int __c)
{
return (uint64x2_t)__builtin_neon_vld1_lanev2di(
(const __builtin_neon_di*)__a, (int64x2_t)__b, __c);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_s8(const int8_t* __a)
{
return (int8x8_t)__builtin_neon_vld1_dupv8qi((const __builtin_neon_qi*)__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_s16(const int16_t* __a)
{
return (int16x4_t)__builtin_neon_vld1_dupv4hi(
(const __builtin_neon_hi*)__a);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_s32(const int32_t* __a)
{
return (int32x2_t)__builtin_neon_vld1_dupv2si(
(const __builtin_neon_si*)__a);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_f16(const float16_t* __a)
{
float16_t __f = *__a;
return (float16x4_t){__f, __f, __f, __f};
}
#endif
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_f32(const float32_t* __a)
{
return (float32x2_t)__builtin_neon_vld1_dupv2sf(
(const __builtin_neon_sf*)__a);
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_u8(const uint8_t* __a)
{
return (uint8x8_t)__builtin_neon_vld1_dupv8qi(
(const __builtin_neon_qi*)__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_u16(const uint16_t* __a)
{
return (uint16x4_t)__builtin_neon_vld1_dupv4hi(
(const __builtin_neon_hi*)__a);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_u32(const uint32_t* __a)
{
return (uint32x2_t)__builtin_neon_vld1_dupv2si(
(const __builtin_neon_si*)__a);
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_p8(const poly8_t* __a)
{
return (poly8x8_t)__builtin_neon_vld1_dupv8qi(
(const __builtin_neon_qi*)__a);
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_p16(const poly16_t* __a)
{
return (poly16x4_t)__builtin_neon_vld1_dupv4hi(
(const __builtin_neon_hi*)__a);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_p64(const poly64_t* __a)
{
return (poly64x1_t)__builtin_neon_vld1_dupdi((const __builtin_neon_di*)__a);
}
#pragma GCC pop_options
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_s64(const int64_t* __a)
{
return (int64x1_t)__builtin_neon_vld1_dupdi((const __builtin_neon_di*)__a);
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1_dup_u64(const uint64_t* __a)
{
return (uint64x1_t)__builtin_neon_vld1_dupdi((const __builtin_neon_di*)__a);
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_s8(const int8_t* __a)
{
return (int8x16_t)__builtin_neon_vld1_dupv16qi(
(const __builtin_neon_qi*)__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_s16(const int16_t* __a)
{
return (int16x8_t)__builtin_neon_vld1_dupv8hi(
(const __builtin_neon_hi*)__a);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_s32(const int32_t* __a)
{
return (int32x4_t)__builtin_neon_vld1_dupv4si(
(const __builtin_neon_si*)__a);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_f16(const float16_t* __a)
{
float16_t __f = *__a;
return (float16x8_t){__f, __f, __f, __f, __f, __f, __f, __f};
}
#endif
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_f32(const float32_t* __a)
{
return (float32x4_t)__builtin_neon_vld1_dupv4sf(
(const __builtin_neon_sf*)__a);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_u8(const uint8_t* __a)
{
return (uint8x16_t)__builtin_neon_vld1_dupv16qi(
(const __builtin_neon_qi*)__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_u16(const uint16_t* __a)
{
return (uint16x8_t)__builtin_neon_vld1_dupv8hi(
(const __builtin_neon_hi*)__a);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_u32(const uint32_t* __a)
{
return (uint32x4_t)__builtin_neon_vld1_dupv4si(
(const __builtin_neon_si*)__a);
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_p8(const poly8_t* __a)
{
return (poly8x16_t)__builtin_neon_vld1_dupv16qi(
(const __builtin_neon_qi*)__a);
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_p16(const poly16_t* __a)
{
return (poly16x8_t)__builtin_neon_vld1_dupv8hi(
(const __builtin_neon_hi*)__a);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_p64(const poly64_t* __a)
{
return (poly64x2_t)__builtin_neon_vld1_dupv2di(
(const __builtin_neon_di*)__a);
}
#pragma GCC pop_options
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_s64(const int64_t* __a)
{
return (int64x2_t)__builtin_neon_vld1_dupv2di(
(const __builtin_neon_di*)__a);
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld1q_dup_u64(const uint64_t* __a)
{
return (uint64x2_t)__builtin_neon_vld1_dupv2di(
(const __builtin_neon_di*)__a);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_p64(poly64_t* __a, poly64x1_t __b)
{
__builtin_neon_vst1di((__builtin_neon_di*)__a, __b);
}
#pragma GCC pop_options
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_s8(int8_t* __a, int8x8_t __b)
{
__builtin_neon_vst1v8qi((__builtin_neon_qi*)__a, __b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_s16(int16_t* __a, int16x4_t __b)
{
__builtin_neon_vst1v4hi((__builtin_neon_hi*)__a, __b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_s32(int32_t* __a, int32x2_t __b)
{
__builtin_neon_vst1v2si((__builtin_neon_si*)__a, __b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_s64(int64_t* __a, int64x1_t __b)
{
__builtin_neon_vst1di((__builtin_neon_di*)__a, __b);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_f16(float16_t* __a, float16x4_t __b)
{
__builtin_neon_vst1v4hf(__a, __b);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_f32(float32_t* __a, float32x2_t __b)
{
__builtin_neon_vst1v2sf((__builtin_neon_sf*)__a, __b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_u8(uint8_t* __a, uint8x8_t __b)
{
__builtin_neon_vst1v8qi((__builtin_neon_qi*)__a, (int8x8_t)__b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_u16(uint16_t* __a, uint16x4_t __b)
{
__builtin_neon_vst1v4hi((__builtin_neon_hi*)__a, (int16x4_t)__b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_u32(uint32_t* __a, uint32x2_t __b)
{
__builtin_neon_vst1v2si((__builtin_neon_si*)__a, (int32x2_t)__b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_u64(uint64_t* __a, uint64x1_t __b)
{
__builtin_neon_vst1di((__builtin_neon_di*)__a, (int64x1_t)__b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_p8(poly8_t* __a, poly8x8_t __b)
{
__builtin_neon_vst1v8qi((__builtin_neon_qi*)__a, (int8x8_t)__b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_p16(poly16_t* __a, poly16x4_t __b)
{
__builtin_neon_vst1v4hi((__builtin_neon_hi*)__a, (int16x4_t)__b);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_p64(poly64_t* __a, poly64x2_t __b)
{
__builtin_neon_vst1v2di((__builtin_neon_di*)__a, (int64x2_t)__b);
}
#pragma GCC pop_options
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_s8(int8_t* __a, int8x16_t __b)
{
__builtin_neon_vst1v16qi((__builtin_neon_qi*)__a, __b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_s16(int16_t* __a, int16x8_t __b)
{
__builtin_neon_vst1v8hi((__builtin_neon_hi*)__a, __b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_s32(int32_t* __a, int32x4_t __b)
{
__builtin_neon_vst1v4si((__builtin_neon_si*)__a, __b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_s64(int64_t* __a, int64x2_t __b)
{
__builtin_neon_vst1v2di((__builtin_neon_di*)__a, __b);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_f16(float16_t* __a, float16x8_t __b)
{
__builtin_neon_vst1v8hf(__a, __b);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_f32(float32_t* __a, float32x4_t __b)
{
__builtin_neon_vst1v4sf((__builtin_neon_sf*)__a, __b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_u8(uint8_t* __a, uint8x16_t __b)
{
__builtin_neon_vst1v16qi((__builtin_neon_qi*)__a, (int8x16_t)__b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_u16(uint16_t* __a, uint16x8_t __b)
{
__builtin_neon_vst1v8hi((__builtin_neon_hi*)__a, (int16x8_t)__b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_u32(uint32_t* __a, uint32x4_t __b)
{
__builtin_neon_vst1v4si((__builtin_neon_si*)__a, (int32x4_t)__b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_u64(uint64_t* __a, uint64x2_t __b)
{
__builtin_neon_vst1v2di((__builtin_neon_di*)__a, (int64x2_t)__b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_p8(poly8_t* __a, poly8x16_t __b)
{
__builtin_neon_vst1v16qi((__builtin_neon_qi*)__a, (int8x16_t)__b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_p16(poly16_t* __a, poly16x8_t __b)
{
__builtin_neon_vst1v8hi((__builtin_neon_hi*)__a, (int16x8_t)__b);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_s8(int8_t* __a, int8x8_t __b, const int __c)
{
__builtin_neon_vst1_lanev8qi((__builtin_neon_qi*)__a, __b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_s16(int16_t* __a, int16x4_t __b, const int __c)
{
__builtin_neon_vst1_lanev4hi((__builtin_neon_hi*)__a, __b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_s32(int32_t* __a, int32x2_t __b, const int __c)
{
__builtin_neon_vst1_lanev2si((__builtin_neon_si*)__a, __b, __c);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_f16(float16_t* __a, float16x4_t __b, const int __c)
{
__builtin_neon_vst1_lanev4hf(__a, __b, __c);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_f32(float32_t* __a, float32x2_t __b, const int __c)
{
__builtin_neon_vst1_lanev2sf((__builtin_neon_sf*)__a, __b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_u8(uint8_t* __a, uint8x8_t __b, const int __c)
{
__builtin_neon_vst1_lanev8qi((__builtin_neon_qi*)__a, (int8x8_t)__b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_u16(uint16_t* __a, uint16x4_t __b, const int __c)
{
__builtin_neon_vst1_lanev4hi((__builtin_neon_hi*)__a, (int16x4_t)__b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_u32(uint32_t* __a, uint32x2_t __b, const int __c)
{
__builtin_neon_vst1_lanev2si((__builtin_neon_si*)__a, (int32x2_t)__b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_p8(poly8_t* __a, poly8x8_t __b, const int __c)
{
__builtin_neon_vst1_lanev8qi((__builtin_neon_qi*)__a, (int8x8_t)__b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_p16(poly16_t* __a, poly16x4_t __b, const int __c)
{
__builtin_neon_vst1_lanev4hi((__builtin_neon_hi*)__a, (int16x4_t)__b, __c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_p64(poly64_t* __a, poly64x1_t __b, const int __c)
{
__builtin_neon_vst1_lanedi((__builtin_neon_di*)__a, __b, __c);
}
#pragma GCC pop_options
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_s64(int64_t* __a, int64x1_t __b, const int __c)
{
__builtin_neon_vst1_lanedi((__builtin_neon_di*)__a, __b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1_lane_u64(uint64_t* __a, uint64x1_t __b, const int __c)
{
__builtin_neon_vst1_lanedi((__builtin_neon_di*)__a, (int64x1_t)__b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_s8(int8_t* __a, int8x16_t __b, const int __c)
{
__builtin_neon_vst1_lanev16qi((__builtin_neon_qi*)__a, __b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_s16(int16_t* __a, int16x8_t __b, const int __c)
{
__builtin_neon_vst1_lanev8hi((__builtin_neon_hi*)__a, __b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_s32(int32_t* __a, int32x4_t __b, const int __c)
{
__builtin_neon_vst1_lanev4si((__builtin_neon_si*)__a, __b, __c);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_f16(float16_t* __a, float16x8_t __b, const int __c)
{
__builtin_neon_vst1_lanev8hf(__a, __b, __c);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_f32(float32_t* __a, float32x4_t __b, const int __c)
{
__builtin_neon_vst1_lanev4sf((__builtin_neon_sf*)__a, __b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_u8(uint8_t* __a, uint8x16_t __b, const int __c)
{
__builtin_neon_vst1_lanev16qi((__builtin_neon_qi*)__a, (int8x16_t)__b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_u16(uint16_t* __a, uint16x8_t __b, const int __c)
{
__builtin_neon_vst1_lanev8hi((__builtin_neon_hi*)__a, (int16x8_t)__b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_u32(uint32_t* __a, uint32x4_t __b, const int __c)
{
__builtin_neon_vst1_lanev4si((__builtin_neon_si*)__a, (int32x4_t)__b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_p8(poly8_t* __a, poly8x16_t __b, const int __c)
{
__builtin_neon_vst1_lanev16qi((__builtin_neon_qi*)__a, (int8x16_t)__b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_p16(poly16_t* __a, poly16x8_t __b, const int __c)
{
__builtin_neon_vst1_lanev8hi((__builtin_neon_hi*)__a, (int16x8_t)__b, __c);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_p64(poly64_t* __a, poly64x2_t __b, const int __c)
{
__builtin_neon_vst1_lanev2di((__builtin_neon_di*)__a, (int64x2_t)__b, __c);
}
#pragma GCC pop_options
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_s64(int64_t* __a, int64x2_t __b, const int __c)
{
__builtin_neon_vst1_lanev2di((__builtin_neon_di*)__a, __b, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst1q_lane_u64(uint64_t* __a, uint64x2_t __b, const int __c)
{
__builtin_neon_vst1_lanev2di((__builtin_neon_di*)__a, (int64x2_t)__b, __c);
}
__extension__ extern __inline int8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_s8(const int8_t* __a)
{
union
{
int8x8x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2v8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline int16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_s16(const int16_t* __a)
{
union
{
int16x4x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2v4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline int32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_s32(const int32_t* __a)
{
union
{
int32x2x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2v2si((const __builtin_neon_si*)__a);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_f16(const float16_t* __a)
{
union
{
float16x4x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2v4hf(__a);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_f32(const float32_t* __a)
{
union
{
float32x2x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2v2sf((const __builtin_neon_sf*)__a);
return __rv.__i;
}
__extension__ extern __inline uint8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_u8(const uint8_t* __a)
{
union
{
uint8x8x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2v8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_u16(const uint16_t* __a)
{
union
{
uint16x4x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2v4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_u32(const uint32_t* __a)
{
union
{
uint32x2x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2v2si((const __builtin_neon_si*)__a);
return __rv.__i;
}
__extension__ extern __inline poly8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_p8(const poly8_t* __a)
{
union
{
poly8x8x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2v8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline poly16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_p16(const poly16_t* __a)
{
union
{
poly16x4x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2v4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_p64(const poly64_t* __a)
{
union
{
poly64x1x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2di((const __builtin_neon_di*)__a);
return __rv.__i;
}
#pragma GCC pop_options
__extension__ extern __inline int64x1x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_s64(const int64_t* __a)
{
union
{
int64x1x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2di((const __builtin_neon_di*)__a);
return __rv.__i;
}
__extension__ extern __inline uint64x1x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_u64(const uint64_t* __a)
{
union
{
uint64x1x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2di((const __builtin_neon_di*)__a);
return __rv.__i;
}
__extension__ extern __inline int8x16x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_s8(const int8_t* __a)
{
union
{
int8x16x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2v16qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline int16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_s16(const int16_t* __a)
{
union
{
int16x8x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2v8hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline int32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_s32(const int32_t* __a)
{
union
{
int32x4x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2v4si((const __builtin_neon_si*)__a);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_f16(const float16_t* __a)
{
union
{
float16x8x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2v8hf(__a);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_f32(const float32_t* __a)
{
union
{
float32x4x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2v4sf((const __builtin_neon_sf*)__a);
return __rv.__i;
}
__extension__ extern __inline uint8x16x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_u8(const uint8_t* __a)
{
union
{
uint8x16x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2v16qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_u16(const uint16_t* __a)
{
union
{
uint16x8x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2v8hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_u32(const uint32_t* __a)
{
union
{
uint32x4x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2v4si((const __builtin_neon_si*)__a);
return __rv.__i;
}
__extension__ extern __inline poly8x16x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_p8(const poly8_t* __a)
{
union
{
poly8x16x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2v16qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline poly16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_p16(const poly16_t* __a)
{
union
{
poly16x8x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2v8hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline int8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_lane_s8(const int8_t* __a, int8x8x2_t __b, const int __c)
{
union
{
int8x8x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
union
{
int8x8x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev8qi((const __builtin_neon_qi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_lane_s16(const int16_t* __a, int16x4x2_t __b, const int __c)
{
union
{
int16x4x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
union
{
int16x4x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev4hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_lane_s32(const int32_t* __a, int32x2x2_t __b, const int __c)
{
union
{
int32x2x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
union
{
int32x2x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev2si((const __builtin_neon_si*)__a,
__bu.__o, __c);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_lane_f16(const float16_t* __a, float16x4x2_t __b, const int __c)
{
union
{
float16x4x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
union
{
float16x4x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev4hf(__a, __bu.__o, __c);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_lane_f32(const float32_t* __a, float32x2x2_t __b, const int __c)
{
union
{
float32x2x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
union
{
float32x2x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev2sf((const __builtin_neon_sf*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_lane_u8(const uint8_t* __a, uint8x8x2_t __b, const int __c)
{
union
{
uint8x8x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
union
{
uint8x8x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev8qi((const __builtin_neon_qi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_lane_u16(const uint16_t* __a, uint16x4x2_t __b, const int __c)
{
union
{
uint16x4x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
union
{
uint16x4x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev4hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_lane_u32(const uint32_t* __a, uint32x2x2_t __b, const int __c)
{
union
{
uint32x2x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
union
{
uint32x2x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev2si((const __builtin_neon_si*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline poly8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_lane_p8(const poly8_t* __a, poly8x8x2_t __b, const int __c)
{
union
{
poly8x8x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
union
{
poly8x8x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev8qi((const __builtin_neon_qi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline poly16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_lane_p16(const poly16_t* __a, poly16x4x2_t __b, const int __c)
{
union
{
poly16x4x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
union
{
poly16x4x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev4hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_lane_s16(const int16_t* __a, int16x8x2_t __b, const int __c)
{
union
{
int16x8x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
int16x8x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev8hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_lane_s32(const int32_t* __a, int32x4x2_t __b, const int __c)
{
union
{
int32x4x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
int32x4x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev4si((const __builtin_neon_si*)__a,
__bu.__o, __c);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_lane_f16(const float16_t* __a, float16x8x2_t __b, const int __c)
{
union
{
float16x8x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
float16x8x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev8hf(__a, __bu.__o, __c);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_lane_f32(const float32_t* __a, float32x4x2_t __b, const int __c)
{
union
{
float32x4x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
float32x4x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev4sf((const __builtin_neon_sf*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_lane_u16(const uint16_t* __a, uint16x8x2_t __b, const int __c)
{
union
{
uint16x8x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
uint16x8x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev8hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint32x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_lane_u32(const uint32_t* __a, uint32x4x2_t __b, const int __c)
{
union
{
uint32x4x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
uint32x4x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev4si((const __builtin_neon_si*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline poly16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2q_lane_p16(const poly16_t* __a, poly16x8x2_t __b, const int __c)
{
union
{
poly16x8x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
poly16x8x2_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld2_lanev8hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_s8(const int8_t* __a)
{
union
{
int8x8x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupv8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline int16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_s16(const int16_t* __a)
{
union
{
int16x4x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupv4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline int32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_s32(const int32_t* __a)
{
union
{
int32x2x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupv2si((const __builtin_neon_si*)__a);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_f16(const float16_t* __a)
{
union
{
float16x4x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupv4hf(__a);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_f32(const float32_t* __a)
{
union
{
float32x2x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupv2sf((const __builtin_neon_sf*)__a);
return __rv.__i;
}
__extension__ extern __inline uint8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_u8(const uint8_t* __a)
{
union
{
uint8x8x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupv8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_u16(const uint16_t* __a)
{
union
{
uint16x4x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupv4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint32x2x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_u32(const uint32_t* __a)
{
union
{
uint32x2x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupv2si((const __builtin_neon_si*)__a);
return __rv.__i;
}
__extension__ extern __inline poly8x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_p8(const poly8_t* __a)
{
union
{
poly8x8x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupv8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline poly16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_p16(const poly16_t* __a)
{
union
{
poly16x4x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupv4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_p64(const poly64_t* __a)
{
union
{
poly64x1x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupdi((const __builtin_neon_di*)__a);
return __rv.__i;
}
#pragma GCC pop_options
__extension__ extern __inline int64x1x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_s64(const int64_t* __a)
{
union
{
int64x1x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupdi((const __builtin_neon_di*)__a);
return __rv.__i;
}
__extension__ extern __inline uint64x1x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld2_dup_u64(const uint64_t* __a)
{
union
{
uint64x1x2_t __i;
__builtin_neon_ti __o;
} __rv;
__rv.__o = __builtin_neon_vld2_dupdi((const __builtin_neon_di*)__a);
return __rv.__i;
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_s8(int8_t* __a, int8x8x2_t __b)
{
union
{
int8x8x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2v8qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_s16(int16_t* __a, int16x4x2_t __b)
{
union
{
int16x4x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2v4hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_s32(int32_t* __a, int32x2x2_t __b)
{
union
{
int32x2x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2v2si((__builtin_neon_si*)__a, __bu.__o);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_f16(float16_t* __a, float16x4x2_t __b)
{
union
{
float16x4x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2v4hf(__a, __bu.__o);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_f32(float32_t* __a, float32x2x2_t __b)
{
union
{
float32x2x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2v2sf((__builtin_neon_sf*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_u8(uint8_t* __a, uint8x8x2_t __b)
{
union
{
uint8x8x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2v8qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_u16(uint16_t* __a, uint16x4x2_t __b)
{
union
{
uint16x4x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2v4hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_u32(uint32_t* __a, uint32x2x2_t __b)
{
union
{
uint32x2x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2v2si((__builtin_neon_si*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_p8(poly8_t* __a, poly8x8x2_t __b)
{
union
{
poly8x8x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2v8qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_p16(poly16_t* __a, poly16x4x2_t __b)
{
union
{
poly16x4x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2v4hi((__builtin_neon_hi*)__a, __bu.__o);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_p64(poly64_t* __a, poly64x1x2_t __b)
{
union
{
poly64x1x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2di((__builtin_neon_di*)__a, __bu.__o);
}
#pragma GCC pop_options
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_s64(int64_t* __a, int64x1x2_t __b)
{
union
{
int64x1x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2di((__builtin_neon_di*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_u64(uint64_t* __a, uint64x1x2_t __b)
{
union
{
uint64x1x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2di((__builtin_neon_di*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_s8(int8_t* __a, int8x16x2_t __b)
{
union
{
int8x16x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2v16qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_s16(int16_t* __a, int16x8x2_t __b)
{
union
{
int16x8x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2v8hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_s32(int32_t* __a, int32x4x2_t __b)
{
union
{
int32x4x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2v4si((__builtin_neon_si*)__a, __bu.__o);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_f16(float16_t* __a, float16x8x2_t __b)
{
union
{
float16x8x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2v8hf(__a, __bu.__o);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_f32(float32_t* __a, float32x4x2_t __b)
{
union
{
float32x4x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2v4sf((__builtin_neon_sf*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_u8(uint8_t* __a, uint8x16x2_t __b)
{
union
{
uint8x16x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2v16qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_u16(uint16_t* __a, uint16x8x2_t __b)
{
union
{
uint16x8x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2v8hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_u32(uint32_t* __a, uint32x4x2_t __b)
{
union
{
uint32x4x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2v4si((__builtin_neon_si*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_p8(poly8_t* __a, poly8x16x2_t __b)
{
union
{
poly8x16x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2v16qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_p16(poly16_t* __a, poly16x8x2_t __b)
{
union
{
poly16x8x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2v8hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_lane_s8(int8_t* __a, int8x8x2_t __b, const int __c)
{
union
{
int8x8x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2_lanev8qi((__builtin_neon_qi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_lane_s16(int16_t* __a, int16x4x2_t __b, const int __c)
{
union
{
int16x4x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2_lanev4hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_lane_s32(int32_t* __a, int32x2x2_t __b, const int __c)
{
union
{
int32x2x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2_lanev2si((__builtin_neon_si*)__a, __bu.__o, __c);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_lane_f16(float16_t* __a, float16x4x2_t __b, const int __c)
{
union
{
float16x4x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2_lanev4hf(__a, __bu.__o, __c);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_lane_f32(float32_t* __a, float32x2x2_t __b, const int __c)
{
union
{
float32x2x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2_lanev2sf((__builtin_neon_sf*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_lane_u8(uint8_t* __a, uint8x8x2_t __b, const int __c)
{
union
{
uint8x8x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2_lanev8qi((__builtin_neon_qi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_lane_u16(uint16_t* __a, uint16x4x2_t __b, const int __c)
{
union
{
uint16x4x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2_lanev4hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_lane_u32(uint32_t* __a, uint32x2x2_t __b, const int __c)
{
union
{
uint32x2x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2_lanev2si((__builtin_neon_si*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_lane_p8(poly8_t* __a, poly8x8x2_t __b, const int __c)
{
union
{
poly8x8x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2_lanev8qi((__builtin_neon_qi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2_lane_p16(poly16_t* __a, poly16x4x2_t __b, const int __c)
{
union
{
poly16x4x2_t __i;
__builtin_neon_ti __o;
} __bu = {__b};
__builtin_neon_vst2_lanev4hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_lane_s16(int16_t* __a, int16x8x2_t __b, const int __c)
{
union
{
int16x8x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2_lanev8hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_lane_s32(int32_t* __a, int32x4x2_t __b, const int __c)
{
union
{
int32x4x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2_lanev4si((__builtin_neon_si*)__a, __bu.__o, __c);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_lane_f16(float16_t* __a, float16x8x2_t __b, const int __c)
{
union
{
float16x8x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2_lanev8hf(__a, __bu.__o, __c);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_lane_f32(float32_t* __a, float32x4x2_t __b, const int __c)
{
union
{
float32x4x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2_lanev4sf((__builtin_neon_sf*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_lane_u16(uint16_t* __a, uint16x8x2_t __b, const int __c)
{
union
{
uint16x8x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2_lanev8hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_lane_u32(uint32_t* __a, uint32x4x2_t __b, const int __c)
{
union
{
uint32x4x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2_lanev4si((__builtin_neon_si*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst2q_lane_p16(poly16_t* __a, poly16x8x2_t __b, const int __c)
{
union
{
poly16x8x2_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst2_lanev8hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline int8x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_s8(const int8_t* __a)
{
union
{
int8x8x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3v8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline int16x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_s16(const int16_t* __a)
{
union
{
int16x4x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3v4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline int32x2x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_s32(const int32_t* __a)
{
union
{
int32x2x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3v2si((const __builtin_neon_si*)__a);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_f16(const float16_t* __a)
{
union
{
float16x4x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3v4hf(__a);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x2x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_f32(const float32_t* __a)
{
union
{
float32x2x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3v2sf((const __builtin_neon_sf*)__a);
return __rv.__i;
}
__extension__ extern __inline uint8x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_u8(const uint8_t* __a)
{
union
{
uint8x8x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3v8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint16x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_u16(const uint16_t* __a)
{
union
{
uint16x4x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3v4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint32x2x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_u32(const uint32_t* __a)
{
union
{
uint32x2x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3v2si((const __builtin_neon_si*)__a);
return __rv.__i;
}
__extension__ extern __inline poly8x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_p8(const poly8_t* __a)
{
union
{
poly8x8x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3v8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline poly16x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_p16(const poly16_t* __a)
{
union
{
poly16x4x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3v4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_p64(const poly64_t* __a)
{
union
{
poly64x1x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3di((const __builtin_neon_di*)__a);
return __rv.__i;
}
#pragma GCC pop_options
__extension__ extern __inline int64x1x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_s64(const int64_t* __a)
{
union
{
int64x1x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3di((const __builtin_neon_di*)__a);
return __rv.__i;
}
__extension__ extern __inline uint64x1x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_u64(const uint64_t* __a)
{
union
{
uint64x1x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3di((const __builtin_neon_di*)__a);
return __rv.__i;
}
__extension__ extern __inline int8x16x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_s8(const int8_t* __a)
{
union
{
int8x16x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3v16qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline int16x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_s16(const int16_t* __a)
{
union
{
int16x8x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3v8hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline int32x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_s32(const int32_t* __a)
{
union
{
int32x4x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3v4si((const __builtin_neon_si*)__a);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_f16(const float16_t* __a)
{
union
{
float16x8x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3v8hf(__a);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_f32(const float32_t* __a)
{
union
{
float32x4x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3v4sf((const __builtin_neon_sf*)__a);
return __rv.__i;
}
__extension__ extern __inline uint8x16x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_u8(const uint8_t* __a)
{
union
{
uint8x16x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3v16qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint16x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_u16(const uint16_t* __a)
{
union
{
uint16x8x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3v8hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint32x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_u32(const uint32_t* __a)
{
union
{
uint32x4x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3v4si((const __builtin_neon_si*)__a);
return __rv.__i;
}
__extension__ extern __inline poly8x16x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_p8(const poly8_t* __a)
{
union
{
poly8x16x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3v16qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline poly16x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_p16(const poly16_t* __a)
{
union
{
poly16x8x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3v8hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline int8x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_lane_s8(const int8_t* __a, int8x8x3_t __b, const int __c)
{
union
{
int8x8x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
union
{
int8x8x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev8qi((const __builtin_neon_qi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int16x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_lane_s16(const int16_t* __a, int16x4x3_t __b, const int __c)
{
union
{
int16x4x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
union
{
int16x4x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev4hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int32x2x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_lane_s32(const int32_t* __a, int32x2x3_t __b, const int __c)
{
union
{
int32x2x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
union
{
int32x2x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev2si((const __builtin_neon_si*)__a,
__bu.__o, __c);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_lane_f16(const float16_t* __a, float16x4x3_t __b, const int __c)
{
union
{
float16x4x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
union
{
float16x4x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev4hf(__a, __bu.__o, __c);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x2x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_lane_f32(const float32_t* __a, float32x2x3_t __b, const int __c)
{
union
{
float32x2x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
union
{
float32x2x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev2sf((const __builtin_neon_sf*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint8x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_lane_u8(const uint8_t* __a, uint8x8x3_t __b, const int __c)
{
union
{
uint8x8x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
union
{
uint8x8x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev8qi((const __builtin_neon_qi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint16x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_lane_u16(const uint16_t* __a, uint16x4x3_t __b, const int __c)
{
union
{
uint16x4x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
union
{
uint16x4x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev4hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint32x2x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_lane_u32(const uint32_t* __a, uint32x2x3_t __b, const int __c)
{
union
{
uint32x2x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
union
{
uint32x2x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev2si((const __builtin_neon_si*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline poly8x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_lane_p8(const poly8_t* __a, poly8x8x3_t __b, const int __c)
{
union
{
poly8x8x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
union
{
poly8x8x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev8qi((const __builtin_neon_qi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline poly16x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_lane_p16(const poly16_t* __a, poly16x4x3_t __b, const int __c)
{
union
{
poly16x4x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
union
{
poly16x4x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev4hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int16x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_lane_s16(const int16_t* __a, int16x8x3_t __b, const int __c)
{
union
{
int16x8x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
union
{
int16x8x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev8hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int32x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_lane_s32(const int32_t* __a, int32x4x3_t __b, const int __c)
{
union
{
int32x4x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
union
{
int32x4x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev4si((const __builtin_neon_si*)__a,
__bu.__o, __c);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_lane_f16(const float16_t* __a, float16x8x3_t __b, const int __c)
{
union
{
float16x8x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
union
{
float16x8x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev8hf(__a, __bu.__o, __c);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_lane_f32(const float32_t* __a, float32x4x3_t __b, const int __c)
{
union
{
float32x4x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
union
{
float32x4x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev4sf((const __builtin_neon_sf*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint16x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_lane_u16(const uint16_t* __a, uint16x8x3_t __b, const int __c)
{
union
{
uint16x8x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
union
{
uint16x8x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev8hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint32x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_lane_u32(const uint32_t* __a, uint32x4x3_t __b, const int __c)
{
union
{
uint32x4x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
union
{
uint32x4x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev4si((const __builtin_neon_si*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline poly16x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3q_lane_p16(const poly16_t* __a, poly16x8x3_t __b, const int __c)
{
union
{
poly16x8x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
union
{
poly16x8x3_t __i;
__builtin_neon_ci __o;
} __rv;
__rv.__o = __builtin_neon_vld3_lanev8hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int8x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_s8(const int8_t* __a)
{
union
{
int8x8x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupv8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline int16x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_s16(const int16_t* __a)
{
union
{
int16x4x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupv4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline int32x2x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_s32(const int32_t* __a)
{
union
{
int32x2x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupv2si((const __builtin_neon_si*)__a);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_f16(const float16_t* __a)
{
union
{
float16x4x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupv4hf(__a);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x2x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_f32(const float32_t* __a)
{
union
{
float32x2x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupv2sf((const __builtin_neon_sf*)__a);
return __rv.__i;
}
__extension__ extern __inline uint8x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_u8(const uint8_t* __a)
{
union
{
uint8x8x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupv8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint16x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_u16(const uint16_t* __a)
{
union
{
uint16x4x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupv4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint32x2x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_u32(const uint32_t* __a)
{
union
{
uint32x2x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupv2si((const __builtin_neon_si*)__a);
return __rv.__i;
}
__extension__ extern __inline poly8x8x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_p8(const poly8_t* __a)
{
union
{
poly8x8x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupv8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline poly16x4x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_p16(const poly16_t* __a)
{
union
{
poly16x4x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupv4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_p64(const poly64_t* __a)
{
union
{
poly64x1x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupdi((const __builtin_neon_di*)__a);
return __rv.__i;
}
#pragma GCC pop_options
__extension__ extern __inline int64x1x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_s64(const int64_t* __a)
{
union
{
int64x1x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupdi((const __builtin_neon_di*)__a);
return __rv.__i;
}
__extension__ extern __inline uint64x1x3_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld3_dup_u64(const uint64_t* __a)
{
union
{
uint64x1x3_t __i;
__builtin_neon_ei __o;
} __rv;
__rv.__o = __builtin_neon_vld3_dupdi((const __builtin_neon_di*)__a);
return __rv.__i;
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_s8(int8_t* __a, int8x8x3_t __b)
{
union
{
int8x8x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3v8qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_s16(int16_t* __a, int16x4x3_t __b)
{
union
{
int16x4x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3v4hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_s32(int32_t* __a, int32x2x3_t __b)
{
union
{
int32x2x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3v2si((__builtin_neon_si*)__a, __bu.__o);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_f16(float16_t* __a, float16x4x3_t __b)
{
union
{
float16x4x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3v4hf(__a, __bu.__o);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_f32(float32_t* __a, float32x2x3_t __b)
{
union
{
float32x2x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3v2sf((__builtin_neon_sf*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_u8(uint8_t* __a, uint8x8x3_t __b)
{
union
{
uint8x8x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3v8qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_u16(uint16_t* __a, uint16x4x3_t __b)
{
union
{
uint16x4x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3v4hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_u32(uint32_t* __a, uint32x2x3_t __b)
{
union
{
uint32x2x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3v2si((__builtin_neon_si*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_p8(poly8_t* __a, poly8x8x3_t __b)
{
union
{
poly8x8x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3v8qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_p16(poly16_t* __a, poly16x4x3_t __b)
{
union
{
poly16x4x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3v4hi((__builtin_neon_hi*)__a, __bu.__o);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_p64(poly64_t* __a, poly64x1x3_t __b)
{
union
{
poly64x1x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3di((__builtin_neon_di*)__a, __bu.__o);
}
#pragma GCC pop_options
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_s64(int64_t* __a, int64x1x3_t __b)
{
union
{
int64x1x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3di((__builtin_neon_di*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_u64(uint64_t* __a, uint64x1x3_t __b)
{
union
{
uint64x1x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3di((__builtin_neon_di*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_s8(int8_t* __a, int8x16x3_t __b)
{
union
{
int8x16x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3v16qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_s16(int16_t* __a, int16x8x3_t __b)
{
union
{
int16x8x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3v8hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_s32(int32_t* __a, int32x4x3_t __b)
{
union
{
int32x4x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3v4si((__builtin_neon_si*)__a, __bu.__o);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_f16(float16_t* __a, float16x8x3_t __b)
{
union
{
float16x8x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3v8hf(__a, __bu.__o);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_f32(float32_t* __a, float32x4x3_t __b)
{
union
{
float32x4x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3v4sf((__builtin_neon_sf*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_u8(uint8_t* __a, uint8x16x3_t __b)
{
union
{
uint8x16x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3v16qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_u16(uint16_t* __a, uint16x8x3_t __b)
{
union
{
uint16x8x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3v8hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_u32(uint32_t* __a, uint32x4x3_t __b)
{
union
{
uint32x4x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3v4si((__builtin_neon_si*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_p8(poly8_t* __a, poly8x16x3_t __b)
{
union
{
poly8x16x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3v16qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_p16(poly16_t* __a, poly16x8x3_t __b)
{
union
{
poly16x8x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3v8hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_lane_s8(int8_t* __a, int8x8x3_t __b, const int __c)
{
union
{
int8x8x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3_lanev8qi((__builtin_neon_qi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_lane_s16(int16_t* __a, int16x4x3_t __b, const int __c)
{
union
{
int16x4x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3_lanev4hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_lane_s32(int32_t* __a, int32x2x3_t __b, const int __c)
{
union
{
int32x2x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3_lanev2si((__builtin_neon_si*)__a, __bu.__o, __c);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_lane_f16(float16_t* __a, float16x4x3_t __b, const int __c)
{
union
{
float16x4x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3_lanev4hf(__a, __bu.__o, __c);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_lane_f32(float32_t* __a, float32x2x3_t __b, const int __c)
{
union
{
float32x2x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3_lanev2sf((__builtin_neon_sf*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_lane_u8(uint8_t* __a, uint8x8x3_t __b, const int __c)
{
union
{
uint8x8x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3_lanev8qi((__builtin_neon_qi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_lane_u16(uint16_t* __a, uint16x4x3_t __b, const int __c)
{
union
{
uint16x4x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3_lanev4hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_lane_u32(uint32_t* __a, uint32x2x3_t __b, const int __c)
{
union
{
uint32x2x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3_lanev2si((__builtin_neon_si*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_lane_p8(poly8_t* __a, poly8x8x3_t __b, const int __c)
{
union
{
poly8x8x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3_lanev8qi((__builtin_neon_qi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3_lane_p16(poly16_t* __a, poly16x4x3_t __b, const int __c)
{
union
{
poly16x4x3_t __i;
__builtin_neon_ei __o;
} __bu = {__b};
__builtin_neon_vst3_lanev4hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_lane_s16(int16_t* __a, int16x8x3_t __b, const int __c)
{
union
{
int16x8x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3_lanev8hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_lane_s32(int32_t* __a, int32x4x3_t __b, const int __c)
{
union
{
int32x4x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3_lanev4si((__builtin_neon_si*)__a, __bu.__o, __c);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_lane_f16(float16_t* __a, float16x8x3_t __b, const int __c)
{
union
{
float16x8x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3_lanev8hf(__a, __bu.__o, __c);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_lane_f32(float32_t* __a, float32x4x3_t __b, const int __c)
{
union
{
float32x4x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3_lanev4sf((__builtin_neon_sf*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_lane_u16(uint16_t* __a, uint16x8x3_t __b, const int __c)
{
union
{
uint16x8x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3_lanev8hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_lane_u32(uint32_t* __a, uint32x4x3_t __b, const int __c)
{
union
{
uint32x4x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3_lanev4si((__builtin_neon_si*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst3q_lane_p16(poly16_t* __a, poly16x8x3_t __b, const int __c)
{
union
{
poly16x8x3_t __i;
__builtin_neon_ci __o;
} __bu = {__b};
__builtin_neon_vst3_lanev8hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline int8x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_s8(const int8_t* __a)
{
union
{
int8x8x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline int16x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_s16(const int16_t* __a)
{
union
{
int16x4x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline int32x2x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_s32(const int32_t* __a)
{
union
{
int32x2x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v2si((const __builtin_neon_si*)__a);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_f16(const float16_t* __a)
{
union
{
float16x4x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v4hf(__a);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x2x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_f32(const float32_t* __a)
{
union
{
float32x2x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v2sf((const __builtin_neon_sf*)__a);
return __rv.__i;
}
__extension__ extern __inline uint8x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_u8(const uint8_t* __a)
{
union
{
uint8x8x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint16x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_u16(const uint16_t* __a)
{
union
{
uint16x4x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint32x2x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_u32(const uint32_t* __a)
{
union
{
uint32x2x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v2si((const __builtin_neon_si*)__a);
return __rv.__i;
}
__extension__ extern __inline poly8x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_p8(const poly8_t* __a)
{
union
{
poly8x8x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline poly16x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_p16(const poly16_t* __a)
{
union
{
poly16x4x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_p64(const poly64_t* __a)
{
union
{
poly64x1x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4di((const __builtin_neon_di*)__a);
return __rv.__i;
}
#pragma GCC pop_options
__extension__ extern __inline int64x1x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_s64(const int64_t* __a)
{
union
{
int64x1x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4di((const __builtin_neon_di*)__a);
return __rv.__i;
}
__extension__ extern __inline uint64x1x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_u64(const uint64_t* __a)
{
union
{
uint64x1x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4di((const __builtin_neon_di*)__a);
return __rv.__i;
}
__extension__ extern __inline int8x16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_s8(const int8_t* __a)
{
union
{
int8x16x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v16qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline int16x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_s16(const int16_t* __a)
{
union
{
int16x8x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v8hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline int32x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_s32(const int32_t* __a)
{
union
{
int32x4x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v4si((const __builtin_neon_si*)__a);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_f16(const float16_t* __a)
{
union
{
float16x8x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v8hf(__a);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_f32(const float32_t* __a)
{
union
{
float32x4x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v4sf((const __builtin_neon_sf*)__a);
return __rv.__i;
}
__extension__ extern __inline uint8x16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_u8(const uint8_t* __a)
{
union
{
uint8x16x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v16qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint16x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_u16(const uint16_t* __a)
{
union
{
uint16x8x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v8hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint32x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_u32(const uint32_t* __a)
{
union
{
uint32x4x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v4si((const __builtin_neon_si*)__a);
return __rv.__i;
}
__extension__ extern __inline poly8x16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_p8(const poly8_t* __a)
{
union
{
poly8x16x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v16qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline poly16x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_p16(const poly16_t* __a)
{
union
{
poly16x8x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4v8hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline int8x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_lane_s8(const int8_t* __a, int8x8x4_t __b, const int __c)
{
union
{
int8x8x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
int8x8x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev8qi((const __builtin_neon_qi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int16x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_lane_s16(const int16_t* __a, int16x4x4_t __b, const int __c)
{
union
{
int16x4x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
int16x4x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev4hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int32x2x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_lane_s32(const int32_t* __a, int32x2x4_t __b, const int __c)
{
union
{
int32x2x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
int32x2x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev2si((const __builtin_neon_si*)__a,
__bu.__o, __c);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_lane_f16(const float16_t* __a, float16x4x4_t __b, const int __c)
{
union
{
float16x4x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
float16x4x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev4hf(__a, __bu.__o, __c);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x2x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_lane_f32(const float32_t* __a, float32x2x4_t __b, const int __c)
{
union
{
float32x2x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
float32x2x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev2sf((const __builtin_neon_sf*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint8x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_lane_u8(const uint8_t* __a, uint8x8x4_t __b, const int __c)
{
union
{
uint8x8x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
uint8x8x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev8qi((const __builtin_neon_qi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint16x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_lane_u16(const uint16_t* __a, uint16x4x4_t __b, const int __c)
{
union
{
uint16x4x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
uint16x4x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev4hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint32x2x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_lane_u32(const uint32_t* __a, uint32x2x4_t __b, const int __c)
{
union
{
uint32x2x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
uint32x2x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev2si((const __builtin_neon_si*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline poly8x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_lane_p8(const poly8_t* __a, poly8x8x4_t __b, const int __c)
{
union
{
poly8x8x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
poly8x8x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev8qi((const __builtin_neon_qi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline poly16x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_lane_p16(const poly16_t* __a, poly16x4x4_t __b, const int __c)
{
union
{
poly16x4x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
union
{
poly16x4x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev4hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int16x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_lane_s16(const int16_t* __a, int16x8x4_t __b, const int __c)
{
union
{
int16x8x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
union
{
int16x8x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev8hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int32x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_lane_s32(const int32_t* __a, int32x4x4_t __b, const int __c)
{
union
{
int32x4x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
union
{
int32x4x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev4si((const __builtin_neon_si*)__a,
__bu.__o, __c);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_lane_f16(const float16_t* __a, float16x8x4_t __b, const int __c)
{
union
{
float16x8x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
union
{
float16x8x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev8hf(__a, __bu.__o, __c);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_lane_f32(const float32_t* __a, float32x4x4_t __b, const int __c)
{
union
{
float32x4x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
union
{
float32x4x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev4sf((const __builtin_neon_sf*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint16x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_lane_u16(const uint16_t* __a, uint16x8x4_t __b, const int __c)
{
union
{
uint16x8x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
union
{
uint16x8x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev8hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline uint32x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_lane_u32(const uint32_t* __a, uint32x4x4_t __b, const int __c)
{
union
{
uint32x4x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
union
{
uint32x4x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev4si((const __builtin_neon_si*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline poly16x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4q_lane_p16(const poly16_t* __a, poly16x8x4_t __b, const int __c)
{
union
{
poly16x8x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
union
{
poly16x8x4_t __i;
__builtin_neon_xi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_lanev8hi((const __builtin_neon_hi*)__a,
__bu.__o, __c);
return __rv.__i;
}
__extension__ extern __inline int8x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_s8(const int8_t* __a)
{
union
{
int8x8x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupv8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline int16x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_s16(const int16_t* __a)
{
union
{
int16x4x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupv4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline int32x2x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_s32(const int32_t* __a)
{
union
{
int32x2x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupv2si((const __builtin_neon_si*)__a);
return __rv.__i;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_f16(const float16_t* __a)
{
union
{
float16x4x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupv4hf(__a);
return __rv.__i;
}
#endif
__extension__ extern __inline float32x2x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_f32(const float32_t* __a)
{
union
{
float32x2x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupv2sf((const __builtin_neon_sf*)__a);
return __rv.__i;
}
__extension__ extern __inline uint8x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_u8(const uint8_t* __a)
{
union
{
uint8x8x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupv8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint16x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_u16(const uint16_t* __a)
{
union
{
uint16x4x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupv4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
__extension__ extern __inline uint32x2x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_u32(const uint32_t* __a)
{
union
{
uint32x2x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupv2si((const __builtin_neon_si*)__a);
return __rv.__i;
}
__extension__ extern __inline poly8x8x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_p8(const poly8_t* __a)
{
union
{
poly8x8x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupv8qi((const __builtin_neon_qi*)__a);
return __rv.__i;
}
__extension__ extern __inline poly16x4x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_p16(const poly16_t* __a)
{
union
{
poly16x4x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupv4hi((const __builtin_neon_hi*)__a);
return __rv.__i;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_p64(const poly64_t* __a)
{
union
{
poly64x1x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupdi((const __builtin_neon_di*)__a);
return __rv.__i;
}
#pragma GCC pop_options
__extension__ extern __inline int64x1x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_s64(const int64_t* __a)
{
union
{
int64x1x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupdi((const __builtin_neon_di*)__a);
return __rv.__i;
}
__extension__ extern __inline uint64x1x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vld4_dup_u64(const uint64_t* __a)
{
union
{
uint64x1x4_t __i;
__builtin_neon_oi __o;
} __rv;
__rv.__o = __builtin_neon_vld4_dupdi((const __builtin_neon_di*)__a);
return __rv.__i;
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_s8(int8_t* __a, int8x8x4_t __b)
{
union
{
int8x8x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4v8qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_s16(int16_t* __a, int16x4x4_t __b)
{
union
{
int16x4x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4v4hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_s32(int32_t* __a, int32x2x4_t __b)
{
union
{
int32x2x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4v2si((__builtin_neon_si*)__a, __bu.__o);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_f16(float16_t* __a, float16x4x4_t __b)
{
union
{
float16x4x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4v4hf(__a, __bu.__o);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_f32(float32_t* __a, float32x2x4_t __b)
{
union
{
float32x2x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4v2sf((__builtin_neon_sf*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_u8(uint8_t* __a, uint8x8x4_t __b)
{
union
{
uint8x8x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4v8qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_u16(uint16_t* __a, uint16x4x4_t __b)
{
union
{
uint16x4x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4v4hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_u32(uint32_t* __a, uint32x2x4_t __b)
{
union
{
uint32x2x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4v2si((__builtin_neon_si*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_p8(poly8_t* __a, poly8x8x4_t __b)
{
union
{
poly8x8x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4v8qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_p16(poly16_t* __a, poly16x4x4_t __b)
{
union
{
poly16x4x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4v4hi((__builtin_neon_hi*)__a, __bu.__o);
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_p64(poly64_t* __a, poly64x1x4_t __b)
{
union
{
poly64x1x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4di((__builtin_neon_di*)__a, __bu.__o);
}
#pragma GCC pop_options
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_s64(int64_t* __a, int64x1x4_t __b)
{
union
{
int64x1x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4di((__builtin_neon_di*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_u64(uint64_t* __a, uint64x1x4_t __b)
{
union
{
uint64x1x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4di((__builtin_neon_di*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_s8(int8_t* __a, int8x16x4_t __b)
{
union
{
int8x16x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4v16qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_s16(int16_t* __a, int16x8x4_t __b)
{
union
{
int16x8x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4v8hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_s32(int32_t* __a, int32x4x4_t __b)
{
union
{
int32x4x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4v4si((__builtin_neon_si*)__a, __bu.__o);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_f16(float16_t* __a, float16x8x4_t __b)
{
union
{
float16x8x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4v8hf(__a, __bu.__o);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_f32(float32_t* __a, float32x4x4_t __b)
{
union
{
float32x4x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4v4sf((__builtin_neon_sf*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_u8(uint8_t* __a, uint8x16x4_t __b)
{
union
{
uint8x16x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4v16qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_u16(uint16_t* __a, uint16x8x4_t __b)
{
union
{
uint16x8x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4v8hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_u32(uint32_t* __a, uint32x4x4_t __b)
{
union
{
uint32x4x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4v4si((__builtin_neon_si*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_p8(poly8_t* __a, poly8x16x4_t __b)
{
union
{
poly8x16x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4v16qi((__builtin_neon_qi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_p16(poly16_t* __a, poly16x8x4_t __b)
{
union
{
poly16x8x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4v8hi((__builtin_neon_hi*)__a, __bu.__o);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_lane_s8(int8_t* __a, int8x8x4_t __b, const int __c)
{
union
{
int8x8x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev8qi((__builtin_neon_qi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_lane_s16(int16_t* __a, int16x4x4_t __b, const int __c)
{
union
{
int16x4x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev4hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_lane_s32(int32_t* __a, int32x2x4_t __b, const int __c)
{
union
{
int32x2x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev2si((__builtin_neon_si*)__a, __bu.__o, __c);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_lane_f16(float16_t* __a, float16x4x4_t __b, const int __c)
{
union
{
float16x4x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev4hf(__a, __bu.__o, __c);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_lane_f32(float32_t* __a, float32x2x4_t __b, const int __c)
{
union
{
float32x2x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev2sf((__builtin_neon_sf*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_lane_u8(uint8_t* __a, uint8x8x4_t __b, const int __c)
{
union
{
uint8x8x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev8qi((__builtin_neon_qi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_lane_u16(uint16_t* __a, uint16x4x4_t __b, const int __c)
{
union
{
uint16x4x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev4hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_lane_u32(uint32_t* __a, uint32x2x4_t __b, const int __c)
{
union
{
uint32x2x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev2si((__builtin_neon_si*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_lane_p8(poly8_t* __a, poly8x8x4_t __b, const int __c)
{
union
{
poly8x8x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev8qi((__builtin_neon_qi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4_lane_p16(poly16_t* __a, poly16x4x4_t __b, const int __c)
{
union
{
poly16x4x4_t __i;
__builtin_neon_oi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev4hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_lane_s16(int16_t* __a, int16x8x4_t __b, const int __c)
{
union
{
int16x8x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev8hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_lane_s32(int32_t* __a, int32x4x4_t __b, const int __c)
{
union
{
int32x4x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev4si((__builtin_neon_si*)__a, __bu.__o, __c);
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_lane_f16(float16_t* __a, float16x8x4_t __b, const int __c)
{
union
{
float16x8x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev8hf(__a, __bu.__o, __c);
}
#endif
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_lane_f32(float32_t* __a, float32x4x4_t __b, const int __c)
{
union
{
float32x4x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev4sf((__builtin_neon_sf*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_lane_u16(uint16_t* __a, uint16x8x4_t __b, const int __c)
{
union
{
uint16x8x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev8hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_lane_u32(uint32_t* __a, uint32x4x4_t __b, const int __c)
{
union
{
uint32x4x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev4si((__builtin_neon_si*)__a, __bu.__o, __c);
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vst4q_lane_p16(poly16_t* __a, poly16x8x4_t __b, const int __c)
{
union
{
poly16x8x4_t __i;
__builtin_neon_xi __o;
} __bu = {__b};
__builtin_neon_vst4_lanev8hi((__builtin_neon_hi*)__a, __bu.__o, __c);
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vand_s8(int8x8_t __a, int8x8_t __b)
{
return __a & __b;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vand_s16(int16x4_t __a, int16x4_t __b)
{
return __a & __b;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vand_s32(int32x2_t __a, int32x2_t __b)
{
return __a & __b;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vand_u8(uint8x8_t __a, uint8x8_t __b)
{
return __a & __b;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vand_u16(uint16x4_t __a, uint16x4_t __b)
{
return __a & __b;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vand_u32(uint32x2_t __a, uint32x2_t __b)
{
return __a & __b;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vand_s64(int64x1_t __a, int64x1_t __b)
{
return __a & __b;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vand_u64(uint64x1_t __a, uint64x1_t __b)
{
return __a & __b;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vandq_s8(int8x16_t __a, int8x16_t __b)
{
return __a & __b;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vandq_s16(int16x8_t __a, int16x8_t __b)
{
return __a & __b;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vandq_s32(int32x4_t __a, int32x4_t __b)
{
return __a & __b;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vandq_s64(int64x2_t __a, int64x2_t __b)
{
return __a & __b;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vandq_u8(uint8x16_t __a, uint8x16_t __b)
{
return __a & __b;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vandq_u16(uint16x8_t __a, uint16x8_t __b)
{
return __a & __b;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vandq_u32(uint32x4_t __a, uint32x4_t __b)
{
return __a & __b;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vandq_u64(uint64x2_t __a, uint64x2_t __b)
{
return __a & __b;
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorr_s8(int8x8_t __a, int8x8_t __b)
{
return __a | __b;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorr_s16(int16x4_t __a, int16x4_t __b)
{
return __a | __b;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorr_s32(int32x2_t __a, int32x2_t __b)
{
return __a | __b;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorr_u8(uint8x8_t __a, uint8x8_t __b)
{
return __a | __b;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorr_u16(uint16x4_t __a, uint16x4_t __b)
{
return __a | __b;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorr_u32(uint32x2_t __a, uint32x2_t __b)
{
return __a | __b;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorr_s64(int64x1_t __a, int64x1_t __b)
{
return __a | __b;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorr_u64(uint64x1_t __a, uint64x1_t __b)
{
return __a | __b;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorrq_s8(int8x16_t __a, int8x16_t __b)
{
return __a | __b;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorrq_s16(int16x8_t __a, int16x8_t __b)
{
return __a | __b;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorrq_s32(int32x4_t __a, int32x4_t __b)
{
return __a | __b;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorrq_s64(int64x2_t __a, int64x2_t __b)
{
return __a | __b;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorrq_u8(uint8x16_t __a, uint8x16_t __b)
{
return __a | __b;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorrq_u16(uint16x8_t __a, uint16x8_t __b)
{
return __a | __b;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorrq_u32(uint32x4_t __a, uint32x4_t __b)
{
return __a | __b;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorrq_u64(uint64x2_t __a, uint64x2_t __b)
{
return __a | __b;
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veor_s8(int8x8_t __a, int8x8_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veor_s16(int16x4_t __a, int16x4_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veor_s32(int32x2_t __a, int32x2_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veor_u8(uint8x8_t __a, uint8x8_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veor_u16(uint16x4_t __a, uint16x4_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veor_u32(uint32x2_t __a, uint32x2_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veor_s64(int64x1_t __a, int64x1_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veor_u64(uint64x1_t __a, uint64x1_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veorq_s8(int8x16_t __a, int8x16_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veorq_s16(int16x8_t __a, int16x8_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veorq_s32(int32x4_t __a, int32x4_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veorq_s64(int64x2_t __a, int64x2_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veorq_u8(uint8x16_t __a, uint8x16_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veorq_u16(uint16x8_t __a, uint16x8_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veorq_u32(uint32x4_t __a, uint32x4_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
veorq_u64(uint64x2_t __a, uint64x2_t __b)
{
return __a ^ __b;
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbic_s8(int8x8_t __a, int8x8_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbic_s16(int16x4_t __a, int16x4_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbic_s32(int32x2_t __a, int32x2_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbic_u8(uint8x8_t __a, uint8x8_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbic_u16(uint16x4_t __a, uint16x4_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbic_u32(uint32x2_t __a, uint32x2_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbic_s64(int64x1_t __a, int64x1_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbic_u64(uint64x1_t __a, uint64x1_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbicq_s8(int8x16_t __a, int8x16_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbicq_s16(int16x8_t __a, int16x8_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbicq_s32(int32x4_t __a, int32x4_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbicq_s64(int64x2_t __a, int64x2_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbicq_u8(uint8x16_t __a, uint8x16_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbicq_u16(uint16x8_t __a, uint16x8_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbicq_u32(uint32x4_t __a, uint32x4_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbicq_u64(uint64x2_t __a, uint64x2_t __b)
{
return __a & ~__b;
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorn_s8(int8x8_t __a, int8x8_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorn_s16(int16x4_t __a, int16x4_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorn_s32(int32x2_t __a, int32x2_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorn_u8(uint8x8_t __a, uint8x8_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorn_u16(uint16x4_t __a, uint16x4_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorn_u32(uint32x2_t __a, uint32x2_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorn_s64(int64x1_t __a, int64x1_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vorn_u64(uint64x1_t __a, uint64x1_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vornq_s8(int8x16_t __a, int8x16_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vornq_s16(int16x8_t __a, int16x8_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vornq_s32(int32x4_t __a, int32x4_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vornq_s64(int64x2_t __a, int64x2_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vornq_u8(uint8x16_t __a, uint8x16_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vornq_u16(uint16x8_t __a, uint16x8_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vornq_u32(uint32x4_t __a, uint32x4_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vornq_u64(uint64x2_t __a, uint64x2_t __b)
{
return __a | ~__b;
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_p16(poly16x4_t __a)
{
return (poly8x8_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_f16(float16x4_t __a)
{
return (poly8x8_t)__a;
}
#endif
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_f32(float32x2_t __a)
{
return (poly8x8_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_p64(poly64x1_t __a)
{
return (poly8x8_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_s64(int64x1_t __a)
{
return (poly8x8_t)__a;
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_u64(uint64x1_t __a)
{
return (poly8x8_t)__a;
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_s8(int8x8_t __a)
{
return (poly8x8_t)__a;
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_s16(int16x4_t __a)
{
return (poly8x8_t)__a;
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_s32(int32x2_t __a)
{
return (poly8x8_t)__a;
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_u8(uint8x8_t __a)
{
return (poly8x8_t)__a;
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_u16(uint16x4_t __a)
{
return (poly8x8_t)__a;
}
__extension__ extern __inline poly8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p8_u32(uint32x2_t __a)
{
return (poly8x8_t)__a;
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_p8(poly8x8_t __a)
{
return (poly16x4_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_f16(float16x4_t __a)
{
return (poly16x4_t)__a;
}
#endif
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_f32(float32x2_t __a)
{
return (poly16x4_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_p64(poly64x1_t __a)
{
return (poly16x4_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_s64(int64x1_t __a)
{
return (poly16x4_t)__a;
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_u64(uint64x1_t __a)
{
return (poly16x4_t)__a;
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_s8(int8x8_t __a)
{
return (poly16x4_t)__a;
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_s16(int16x4_t __a)
{
return (poly16x4_t)__a;
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_s32(int32x2_t __a)
{
return (poly16x4_t)__a;
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_u8(uint8x8_t __a)
{
return (poly16x4_t)__a;
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_u16(uint16x4_t __a)
{
return (poly16x4_t)__a;
}
__extension__ extern __inline poly16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p16_u32(uint32x2_t __a)
{
return (poly16x4_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_p8(poly8x8_t __a)
{
return (float16x4_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_p16(poly16x4_t __a)
{
return (float16x4_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_f32(float32x2_t __a)
{
return (float16x4_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_p64(poly64x1_t __a)
{
return (float16x4_t)__a;
}
#pragma GCC pop_options
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_s64(int64x1_t __a)
{
return (float16x4_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_u64(uint64x1_t __a)
{
return (float16x4_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_s8(int8x8_t __a)
{
return (float16x4_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_s16(int16x4_t __a)
{
return (float16x4_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_s32(int32x2_t __a)
{
return (float16x4_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_u8(uint8x8_t __a)
{
return (float16x4_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_u16(uint16x4_t __a)
{
return (float16x4_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f16_u32(uint32x2_t __a)
{
return (float16x4_t)__a;
}
#endif
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_p8(poly8x8_t __a)
{
return (float32x2_t)__a;
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_p16(poly16x4_t __a)
{
return (float32x2_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_f16(float16x4_t __a)
{
return (float32x2_t)__a;
}
#endif
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_p64(poly64x1_t __a)
{
return (float32x2_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_s64(int64x1_t __a)
{
return (float32x2_t)__a;
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_u64(uint64x1_t __a)
{
return (float32x2_t)__a;
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_s8(int8x8_t __a)
{
return (float32x2_t)__a;
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_s16(int16x4_t __a)
{
return (float32x2_t)__a;
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_s32(int32x2_t __a)
{
return (float32x2_t)__a;
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_u8(uint8x8_t __a)
{
return (float32x2_t)__a;
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_u16(uint16x4_t __a)
{
return (float32x2_t)__a;
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_f32_u32(uint32x2_t __a)
{
return (float32x2_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p64_p8(poly8x8_t __a)
{
return (poly64x1_t)__a;
}
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p64_p16(poly16x4_t __a)
{
return (poly64x1_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p64_f16(float16x4_t __a)
{
return (poly64x1_t)__a;
}
#endif
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p64_f32(float32x2_t __a)
{
return (poly64x1_t)__a;
}
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p64_s64(int64x1_t __a)
{
return (poly64x1_t)__a;
}
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p64_u64(uint64x1_t __a)
{
return (poly64x1_t)__a;
}
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p64_s8(int8x8_t __a)
{
return (poly64x1_t)__a;
}
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p64_s16(int16x4_t __a)
{
return (poly64x1_t)__a;
}
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p64_s32(int32x2_t __a)
{
return (poly64x1_t)__a;
}
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p64_u8(uint8x8_t __a)
{
return (poly64x1_t)__a;
}
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p64_u16(uint16x4_t __a)
{
return (poly64x1_t)__a;
}
__extension__ extern __inline poly64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_p64_u32(uint32x2_t __a)
{
return (poly64x1_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_p8(poly8x8_t __a)
{
return (int64x1_t)__a;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_p16(poly16x4_t __a)
{
return (int64x1_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_f16(float16x4_t __a)
{
return (int64x1_t)__a;
}
#endif
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_f32(float32x2_t __a)
{
return (int64x1_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_p64(poly64x1_t __a)
{
return (int64x1_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_u64(uint64x1_t __a)
{
return (int64x1_t)__a;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_s8(int8x8_t __a)
{
return (int64x1_t)__a;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_s16(int16x4_t __a)
{
return (int64x1_t)__a;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_s32(int32x2_t __a)
{
return (int64x1_t)__a;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_u8(uint8x8_t __a)
{
return (int64x1_t)__a;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_u16(uint16x4_t __a)
{
return (int64x1_t)__a;
}
__extension__ extern __inline int64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s64_u32(uint32x2_t __a)
{
return (int64x1_t)__a;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_p8(poly8x8_t __a)
{
return (uint64x1_t)__a;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_p16(poly16x4_t __a)
{
return (uint64x1_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_f16(float16x4_t __a)
{
return (uint64x1_t)__a;
}
#endif
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_f32(float32x2_t __a)
{
return (uint64x1_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_p64(poly64x1_t __a)
{
return (uint64x1_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_s64(int64x1_t __a)
{
return (uint64x1_t)__a;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_s8(int8x8_t __a)
{
return (uint64x1_t)__a;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_s16(int16x4_t __a)
{
return (uint64x1_t)__a;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_s32(int32x2_t __a)
{
return (uint64x1_t)__a;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_u8(uint8x8_t __a)
{
return (uint64x1_t)__a;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_u16(uint16x4_t __a)
{
return (uint64x1_t)__a;
}
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u64_u32(uint32x2_t __a)
{
return (uint64x1_t)__a;
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_p8(poly8x8_t __a)
{
return (int8x8_t)__a;
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_p16(poly16x4_t __a)
{
return (int8x8_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_f16(float16x4_t __a)
{
return (int8x8_t)__a;
}
#endif
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_f32(float32x2_t __a)
{
return (int8x8_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_p64(poly64x1_t __a)
{
return (int8x8_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_s64(int64x1_t __a)
{
return (int8x8_t)__a;
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_u64(uint64x1_t __a)
{
return (int8x8_t)__a;
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_s16(int16x4_t __a)
{
return (int8x8_t)__a;
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_s32(int32x2_t __a)
{
return (int8x8_t)__a;
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_u8(uint8x8_t __a)
{
return (int8x8_t)__a;
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_u16(uint16x4_t __a)
{
return (int8x8_t)__a;
}
__extension__ extern __inline int8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s8_u32(uint32x2_t __a)
{
return (int8x8_t)__a;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_p8(poly8x8_t __a)
{
return (int16x4_t)__a;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_p16(poly16x4_t __a)
{
return (int16x4_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_f16(float16x4_t __a)
{
return (int16x4_t)__a;
}
#endif
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_f32(float32x2_t __a)
{
return (int16x4_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_p64(poly64x1_t __a)
{
return (int16x4_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_s64(int64x1_t __a)
{
return (int16x4_t)__a;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_u64(uint64x1_t __a)
{
return (int16x4_t)__a;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_s8(int8x8_t __a)
{
return (int16x4_t)__a;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_s32(int32x2_t __a)
{
return (int16x4_t)__a;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_u8(uint8x8_t __a)
{
return (int16x4_t)__a;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_u16(uint16x4_t __a)
{
return (int16x4_t)__a;
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s16_u32(uint32x2_t __a)
{
return (int16x4_t)__a;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_p8(poly8x8_t __a)
{
return (int32x2_t)__a;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_p16(poly16x4_t __a)
{
return (int32x2_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_f16(float16x4_t __a)
{
return (int32x2_t)__a;
}
#endif
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_f32(float32x2_t __a)
{
return (int32x2_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_p64(poly64x1_t __a)
{
return (int32x2_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_s64(int64x1_t __a)
{
return (int32x2_t)__a;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_u64(uint64x1_t __a)
{
return (int32x2_t)__a;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_s8(int8x8_t __a)
{
return (int32x2_t)__a;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_s16(int16x4_t __a)
{
return (int32x2_t)__a;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_u8(uint8x8_t __a)
{
return (int32x2_t)__a;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_u16(uint16x4_t __a)
{
return (int32x2_t)__a;
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_s32_u32(uint32x2_t __a)
{
return (int32x2_t)__a;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_p8(poly8x8_t __a)
{
return (uint8x8_t)__a;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_p16(poly16x4_t __a)
{
return (uint8x8_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_f16(float16x4_t __a)
{
return (uint8x8_t)__a;
}
#endif
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_f32(float32x2_t __a)
{
return (uint8x8_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_p64(poly64x1_t __a)
{
return (uint8x8_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_s64(int64x1_t __a)
{
return (uint8x8_t)__a;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_u64(uint64x1_t __a)
{
return (uint8x8_t)__a;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_s8(int8x8_t __a)
{
return (uint8x8_t)__a;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_s16(int16x4_t __a)
{
return (uint8x8_t)__a;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_s32(int32x2_t __a)
{
return (uint8x8_t)__a;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_u16(uint16x4_t __a)
{
return (uint8x8_t)__a;
}
__extension__ extern __inline uint8x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u8_u32(uint32x2_t __a)
{
return (uint8x8_t)__a;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_p8(poly8x8_t __a)
{
return (uint16x4_t)__a;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_p16(poly16x4_t __a)
{
return (uint16x4_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_f16(float16x4_t __a)
{
return (uint16x4_t)__a;
}
#endif
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_f32(float32x2_t __a)
{
return (uint16x4_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_p64(poly64x1_t __a)
{
return (uint16x4_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_s64(int64x1_t __a)
{
return (uint16x4_t)__a;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_u64(uint64x1_t __a)
{
return (uint16x4_t)__a;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_s8(int8x8_t __a)
{
return (uint16x4_t)__a;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_s16(int16x4_t __a)
{
return (uint16x4_t)__a;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_s32(int32x2_t __a)
{
return (uint16x4_t)__a;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_u8(uint8x8_t __a)
{
return (uint16x4_t)__a;
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u16_u32(uint32x2_t __a)
{
return (uint16x4_t)__a;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_p8(poly8x8_t __a)
{
return (uint32x2_t)__a;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_p16(poly16x4_t __a)
{
return (uint32x2_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_f16(float16x4_t __a)
{
return (uint32x2_t)__a;
}
#endif
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_f32(float32x2_t __a)
{
return (uint32x2_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_p64(poly64x1_t __a)
{
return (uint32x2_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_s64(int64x1_t __a)
{
return (uint32x2_t)__a;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_u64(uint64x1_t __a)
{
return (uint32x2_t)__a;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_s8(int8x8_t __a)
{
return (uint32x2_t)__a;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_s16(int16x4_t __a)
{
return (uint32x2_t)__a;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_s32(int32x2_t __a)
{
return (uint32x2_t)__a;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_u8(uint8x8_t __a)
{
return (uint32x2_t)__a;
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpret_u32_u16(uint16x4_t __a)
{
return (uint32x2_t)__a;
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_p16(poly16x8_t __a)
{
return (poly8x16_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_f16(float16x8_t __a)
{
return (poly8x16_t)__a;
}
#endif
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_f32(float32x4_t __a)
{
return (poly8x16_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_p64(poly64x2_t __a)
{
return (poly8x16_t)__a;
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_p128(poly128_t __a)
{
return (poly8x16_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_s64(int64x2_t __a)
{
return (poly8x16_t)__a;
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_u64(uint64x2_t __a)
{
return (poly8x16_t)__a;
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_s8(int8x16_t __a)
{
return (poly8x16_t)__a;
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_s16(int16x8_t __a)
{
return (poly8x16_t)__a;
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_s32(int32x4_t __a)
{
return (poly8x16_t)__a;
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_u8(uint8x16_t __a)
{
return (poly8x16_t)__a;
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_u16(uint16x8_t __a)
{
return (poly8x16_t)__a;
}
__extension__ extern __inline poly8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p8_u32(uint32x4_t __a)
{
return (poly8x16_t)__a;
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_p8(poly8x16_t __a)
{
return (poly16x8_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_f16(float16x8_t __a)
{
return (poly16x8_t)__a;
}
#endif
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_f32(float32x4_t __a)
{
return (poly16x8_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_p64(poly64x2_t __a)
{
return (poly16x8_t)__a;
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_p128(poly128_t __a)
{
return (poly16x8_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_s64(int64x2_t __a)
{
return (poly16x8_t)__a;
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_u64(uint64x2_t __a)
{
return (poly16x8_t)__a;
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_s8(int8x16_t __a)
{
return (poly16x8_t)__a;
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_s16(int16x8_t __a)
{
return (poly16x8_t)__a;
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_s32(int32x4_t __a)
{
return (poly16x8_t)__a;
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_u8(uint8x16_t __a)
{
return (poly16x8_t)__a;
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_u16(uint16x8_t __a)
{
return (poly16x8_t)__a;
}
__extension__ extern __inline poly16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p16_u32(uint32x4_t __a)
{
return (poly16x8_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_p8(poly8x16_t __a)
{
return (float16x8_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_p16(poly16x8_t __a)
{
return (float16x8_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_f32(float32x4_t __a)
{
return (float16x8_t)__a;
}
#endif
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_p64(poly64x2_t __a)
{
return (float16x8_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_p128(poly128_t __a)
{
return (float16x8_t)__a;
}
#endif
#pragma GCC pop_options
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_s64(int64x2_t __a)
{
return (float16x8_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_u64(uint64x2_t __a)
{
return (float16x8_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_s8(int8x16_t __a)
{
return (float16x8_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_s16(int16x8_t __a)
{
return (float16x8_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_s32(int32x4_t __a)
{
return (float16x8_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_u8(uint8x16_t __a)
{
return (float16x8_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_u16(uint16x8_t __a)
{
return (float16x8_t)__a;
}
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f16_u32(uint32x4_t __a)
{
return (float16x8_t)__a;
}
#endif
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_p8(poly8x16_t __a)
{
return (float32x4_t)__a;
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_p16(poly16x8_t __a)
{
return (float32x4_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_f16(float16x8_t __a)
{
return (float32x4_t)__a;
}
#endif
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_p64(poly64x2_t __a)
{
return (float32x4_t)__a;
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_p128(poly128_t __a)
{
return (float32x4_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_s64(int64x2_t __a)
{
return (float32x4_t)__a;
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_u64(uint64x2_t __a)
{
return (float32x4_t)__a;
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_s8(int8x16_t __a)
{
return (float32x4_t)__a;
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_s16(int16x8_t __a)
{
return (float32x4_t)__a;
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_s32(int32x4_t __a)
{
return (float32x4_t)__a;
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_u8(uint8x16_t __a)
{
return (float32x4_t)__a;
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_u16(uint16x8_t __a)
{
return (float32x4_t)__a;
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_f32_u32(uint32x4_t __a)
{
return (float32x4_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_p8(poly8x16_t __a)
{
return (poly64x2_t)__a;
}
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_p16(poly16x8_t __a)
{
return (poly64x2_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_f16(float16x8_t __a)
{
return (poly64x2_t)__a;
}
#endif
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_f32(float32x4_t __a)
{
return (poly64x2_t)__a;
}
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_p128(poly128_t __a)
{
return (poly64x2_t)__a;
}
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_s64(int64x2_t __a)
{
return (poly64x2_t)__a;
}
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_u64(uint64x2_t __a)
{
return (poly64x2_t)__a;
}
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_s8(int8x16_t __a)
{
return (poly64x2_t)__a;
}
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_s16(int16x8_t __a)
{
return (poly64x2_t)__a;
}
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_s32(int32x4_t __a)
{
return (poly64x2_t)__a;
}
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_u8(uint8x16_t __a)
{
return (poly64x2_t)__a;
}
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_u16(uint16x8_t __a)
{
return (poly64x2_t)__a;
}
__extension__ extern __inline poly64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p64_u32(uint32x4_t __a)
{
return (poly64x2_t)__a;
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_p8(poly8x16_t __a)
{
return (poly128_t)__a;
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_p16(poly16x8_t __a)
{
return (poly128_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_f16(float16x8_t __a)
{
return (poly128_t)__a;
}
#endif
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_f32(float32x4_t __a)
{
return (poly128_t)__a;
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_p64(poly64x2_t __a)
{
return (poly128_t)__a;
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_s64(int64x2_t __a)
{
return (poly128_t)__a;
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_u64(uint64x2_t __a)
{
return (poly128_t)__a;
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_s8(int8x16_t __a)
{
return (poly128_t)__a;
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_s16(int16x8_t __a)
{
return (poly128_t)__a;
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_s32(int32x4_t __a)
{
return (poly128_t)__a;
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_u8(uint8x16_t __a)
{
return (poly128_t)__a;
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_u16(uint16x8_t __a)
{
return (poly128_t)__a;
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_p128_u32(uint32x4_t __a)
{
return (poly128_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_p8(poly8x16_t __a)
{
return (int64x2_t)__a;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_p16(poly16x8_t __a)
{
return (int64x2_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_f16(float16x8_t __a)
{
return (int64x2_t)__a;
}
#endif
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_f32(float32x4_t __a)
{
return (int64x2_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_p64(poly64x2_t __a)
{
return (int64x2_t)__a;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_p128(poly128_t __a)
{
return (int64x2_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_u64(uint64x2_t __a)
{
return (int64x2_t)__a;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_s8(int8x16_t __a)
{
return (int64x2_t)__a;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_s16(int16x8_t __a)
{
return (int64x2_t)__a;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_s32(int32x4_t __a)
{
return (int64x2_t)__a;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_u8(uint8x16_t __a)
{
return (int64x2_t)__a;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_u16(uint16x8_t __a)
{
return (int64x2_t)__a;
}
__extension__ extern __inline int64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s64_u32(uint32x4_t __a)
{
return (int64x2_t)__a;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_p8(poly8x16_t __a)
{
return (uint64x2_t)__a;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_p16(poly16x8_t __a)
{
return (uint64x2_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_f16(float16x8_t __a)
{
return (uint64x2_t)__a;
}
#endif
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_f32(float32x4_t __a)
{
return (uint64x2_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_p64(poly64x2_t __a)
{
return (uint64x2_t)__a;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_p128(poly128_t __a)
{
return (uint64x2_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_s64(int64x2_t __a)
{
return (uint64x2_t)__a;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_s8(int8x16_t __a)
{
return (uint64x2_t)__a;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_s16(int16x8_t __a)
{
return (uint64x2_t)__a;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_s32(int32x4_t __a)
{
return (uint64x2_t)__a;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_u8(uint8x16_t __a)
{
return (uint64x2_t)__a;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_u16(uint16x8_t __a)
{
return (uint64x2_t)__a;
}
__extension__ extern __inline uint64x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u64_u32(uint32x4_t __a)
{
return (uint64x2_t)__a;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_p8(poly8x16_t __a)
{
return (int8x16_t)__a;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_p16(poly16x8_t __a)
{
return (int8x16_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_f16(float16x8_t __a)
{
return (int8x16_t)__a;
}
#endif
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_f32(float32x4_t __a)
{
return (int8x16_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_p64(poly64x2_t __a)
{
return (int8x16_t)__a;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_p128(poly128_t __a)
{
return (int8x16_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_s64(int64x2_t __a)
{
return (int8x16_t)__a;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_u64(uint64x2_t __a)
{
return (int8x16_t)__a;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_s16(int16x8_t __a)
{
return (int8x16_t)__a;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_s32(int32x4_t __a)
{
return (int8x16_t)__a;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_u8(uint8x16_t __a)
{
return (int8x16_t)__a;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_u16(uint16x8_t __a)
{
return (int8x16_t)__a;
}
__extension__ extern __inline int8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s8_u32(uint32x4_t __a)
{
return (int8x16_t)__a;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_p8(poly8x16_t __a)
{
return (int16x8_t)__a;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_p16(poly16x8_t __a)
{
return (int16x8_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_f16(float16x8_t __a)
{
return (int16x8_t)__a;
}
#endif
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_f32(float32x4_t __a)
{
return (int16x8_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_p64(poly64x2_t __a)
{
return (int16x8_t)__a;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_p128(poly128_t __a)
{
return (int16x8_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_s64(int64x2_t __a)
{
return (int16x8_t)__a;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_u64(uint64x2_t __a)
{
return (int16x8_t)__a;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_s8(int8x16_t __a)
{
return (int16x8_t)__a;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_s32(int32x4_t __a)
{
return (int16x8_t)__a;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_u8(uint8x16_t __a)
{
return (int16x8_t)__a;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_u16(uint16x8_t __a)
{
return (int16x8_t)__a;
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s16_u32(uint32x4_t __a)
{
return (int16x8_t)__a;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_p8(poly8x16_t __a)
{
return (int32x4_t)__a;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_p16(poly16x8_t __a)
{
return (int32x4_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_f16(float16x8_t __a)
{
return (int32x4_t)__a;
}
#endif
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_f32(float32x4_t __a)
{
return (int32x4_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_p64(poly64x2_t __a)
{
return (int32x4_t)__a;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_p128(poly128_t __a)
{
return (int32x4_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_s64(int64x2_t __a)
{
return (int32x4_t)__a;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_u64(uint64x2_t __a)
{
return (int32x4_t)__a;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_s8(int8x16_t __a)
{
return (int32x4_t)__a;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_s16(int16x8_t __a)
{
return (int32x4_t)__a;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_u8(uint8x16_t __a)
{
return (int32x4_t)__a;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_u16(uint16x8_t __a)
{
return (int32x4_t)__a;
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_s32_u32(uint32x4_t __a)
{
return (int32x4_t)__a;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_p8(poly8x16_t __a)
{
return (uint8x16_t)__a;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_p16(poly16x8_t __a)
{
return (uint8x16_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_f16(float16x8_t __a)
{
return (uint8x16_t)__a;
}
#endif
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_f32(float32x4_t __a)
{
return (uint8x16_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_p64(poly64x2_t __a)
{
return (uint8x16_t)__a;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_p128(poly128_t __a)
{
return (uint8x16_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_s64(int64x2_t __a)
{
return (uint8x16_t)__a;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_u64(uint64x2_t __a)
{
return (uint8x16_t)__a;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_s8(int8x16_t __a)
{
return (uint8x16_t)__a;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_s16(int16x8_t __a)
{
return (uint8x16_t)__a;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_s32(int32x4_t __a)
{
return (uint8x16_t)__a;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_u16(uint16x8_t __a)
{
return (uint8x16_t)__a;
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u8_u32(uint32x4_t __a)
{
return (uint8x16_t)__a;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_p8(poly8x16_t __a)
{
return (uint16x8_t)__a;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_p16(poly16x8_t __a)
{
return (uint16x8_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_f16(float16x8_t __a)
{
return (uint16x8_t)__a;
}
#endif
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_f32(float32x4_t __a)
{
return (uint16x8_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_p64(poly64x2_t __a)
{
return (uint16x8_t)__a;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_p128(poly128_t __a)
{
return (uint16x8_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_s64(int64x2_t __a)
{
return (uint16x8_t)__a;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_u64(uint64x2_t __a)
{
return (uint16x8_t)__a;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_s8(int8x16_t __a)
{
return (uint16x8_t)__a;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_s16(int16x8_t __a)
{
return (uint16x8_t)__a;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_s32(int32x4_t __a)
{
return (uint16x8_t)__a;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_u8(uint8x16_t __a)
{
return (uint16x8_t)__a;
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u16_u32(uint32x4_t __a)
{
return (uint16x8_t)__a;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_p8(poly8x16_t __a)
{
return (uint32x4_t)__a;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_p16(poly16x8_t __a)
{
return (uint32x4_t)__a;
}
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_f16(float16x8_t __a)
{
return (uint32x4_t)__a;
}
#endif
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_f32(float32x4_t __a)
{
return (uint32x4_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_p64(poly64x2_t __a)
{
return (uint32x4_t)__a;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_p128(poly128_t __a)
{
return (uint32x4_t)__a;
}
#pragma GCC pop_options
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_s64(int64x2_t __a)
{
return (uint32x4_t)__a;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_u64(uint64x2_t __a)
{
return (uint32x4_t)__a;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_s8(int8x16_t __a)
{
return (uint32x4_t)__a;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_s16(int16x8_t __a)
{
return (uint32x4_t)__a;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_s32(int32x4_t __a)
{
return (uint32x4_t)__a;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_u8(uint8x16_t __a)
{
return (uint32x4_t)__a;
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vreinterpretq_u32_u16(uint16x8_t __a)
{
return (uint32x4_t)__a;
}
#pragma GCC push_options
#pragma GCC target("fpu=crypto-neon-fp-armv8")
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vldrq_p128(poly128_t const* __ptr)
{
#ifdef __ARM_BIG_ENDIAN
poly64_t* __ptmp = (poly64_t*)__ptr;
poly64_t __d0 = vld1_p64(__ptmp);
poly64_t __d1 = vld1_p64(__ptmp + 1);
return vreinterpretq_p128_p64(vcombine_p64(__d1, __d0));
#else
return vreinterpretq_p128_p64(vld1q_p64((poly64_t*)__ptr));
#endif
}
__extension__ extern __inline void
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vstrq_p128(poly128_t* __ptr, poly128_t __val)
{
#ifdef __ARM_BIG_ENDIAN
poly64x2_t __tmp = vreinterpretq_p64_p128(__val);
poly64_t __d0 = vget_high_p64(__tmp);
poly64_t __d1 = vget_low_p64(__tmp);
vst1q_p64((poly64_t*)__ptr, vcombine_p64(__d0, __d1));
#else
vst1q_p64((poly64_t*)__ptr, vreinterpretq_p64_p128(__val));
#endif
}
/* The vceq_p64 intrinsic does not map to a single instruction.
Instead we emulate it by performing a 32-bit variant of the vceq
and applying a pairwise min reduction to the result.
vceq_u32 will produce two 32-bit halves, each of which will contain either
all ones or all zeros depending on whether the corresponding 32-bit
halves of the poly64_t were equal. The whole poly64_t values are equal
if and only if both halves are equal, i.e. vceq_u32 returns all ones.
If the result is all zeroes for any half then the whole result is zeroes.
This is what the pairwise min reduction achieves. */
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceq_p64(poly64x1_t __a, poly64x1_t __b)
{
uint32x2_t __t_a = vreinterpret_u32_p64(__a);
uint32x2_t __t_b = vreinterpret_u32_p64(__b);
uint32x2_t __c = vceq_u32(__t_a, __t_b);
uint32x2_t __m = vpmin_u32(__c, __c);
return vreinterpret_u64_u32(__m);
}
/* The vtst_p64 intrinsic does not map to a single instruction.
We emulate it in way similar to vceq_p64 above but here we do
a reduction with max since if any two corresponding bits
in the two poly64_t's match, then the whole result must be all ones. */
__extension__ extern __inline uint64x1_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtst_p64(poly64x1_t __a, poly64x1_t __b)
{
uint32x2_t __t_a = vreinterpret_u32_p64(__a);
uint32x2_t __t_b = vreinterpret_u32_p64(__b);
uint32x2_t __c = vtst_u32(__t_a, __t_b);
uint32x2_t __m = vpmax_u32(__c, __c);
return vreinterpret_u64_u32(__m);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaeseq_u8(uint8x16_t __data, uint8x16_t __key)
{
return __builtin_arm_crypto_aese(__data, __key);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaesdq_u8(uint8x16_t __data, uint8x16_t __key)
{
return __builtin_arm_crypto_aesd(__data, __key);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaesmcq_u8(uint8x16_t __data)
{
return __builtin_arm_crypto_aesmc(__data);
}
__extension__ extern __inline uint8x16_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaesimcq_u8(uint8x16_t __data)
{
return __builtin_arm_crypto_aesimc(__data);
}
__extension__ extern __inline uint32_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsha1h_u32(uint32_t __hash_e)
{
uint32x4_t __t = vdupq_n_u32(0);
__t = vsetq_lane_u32(__hash_e, __t, 0);
__t = __builtin_arm_crypto_sha1h(__t);
return vgetq_lane_u32(__t, 0);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsha1cq_u32(uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
{
uint32x4_t __t = vdupq_n_u32(0);
__t = vsetq_lane_u32(__hash_e, __t, 0);
return __builtin_arm_crypto_sha1c(__hash_abcd, __t, __wk);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsha1pq_u32(uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
{
uint32x4_t __t = vdupq_n_u32(0);
__t = vsetq_lane_u32(__hash_e, __t, 0);
return __builtin_arm_crypto_sha1p(__hash_abcd, __t, __wk);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsha1mq_u32(uint32x4_t __hash_abcd, uint32_t __hash_e, uint32x4_t __wk)
{
uint32x4_t __t = vdupq_n_u32(0);
__t = vsetq_lane_u32(__hash_e, __t, 0);
return __builtin_arm_crypto_sha1m(__hash_abcd, __t, __wk);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsha1su0q_u32(uint32x4_t __w0_3, uint32x4_t __w4_7, uint32x4_t __w8_11)
{
return __builtin_arm_crypto_sha1su0(__w0_3, __w4_7, __w8_11);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsha1su1q_u32(uint32x4_t __tw0_3, uint32x4_t __w12_15)
{
return __builtin_arm_crypto_sha1su1(__tw0_3, __w12_15);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsha256hq_u32(uint32x4_t __hash_abcd, uint32x4_t __hash_efgh,
uint32x4_t __wk)
{
return __builtin_arm_crypto_sha256h(__hash_abcd, __hash_efgh, __wk);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsha256h2q_u32(uint32x4_t __hash_abcd, uint32x4_t __hash_efgh,
uint32x4_t __wk)
{
return __builtin_arm_crypto_sha256h2(__hash_abcd, __hash_efgh, __wk);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsha256su0q_u32(uint32x4_t __w0_3, uint32x4_t __w4_7)
{
return __builtin_arm_crypto_sha256su0(__w0_3, __w4_7);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsha256su1q_u32(uint32x4_t __tw0_3, uint32x4_t __w8_11, uint32x4_t __w12_15)
{
return __builtin_arm_crypto_sha256su1(__tw0_3, __w8_11, __w12_15);
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_p64(poly64_t __a, poly64_t __b)
{
return (poly128_t)__builtin_arm_crypto_vmullp64((uint64_t)__a,
(uint64_t)__b);
}
__extension__ extern __inline poly128_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmull_high_p64(poly64x2_t __a, poly64x2_t __b)
{
poly64_t __t1 = vget_high_p64(__a);
poly64_t __t2 = vget_high_p64(__b);
return (poly128_t)__builtin_arm_crypto_vmullp64((uint64_t)__t1,
(uint64_t)__t2);
}
#pragma GCC pop_options
/* Intrinsics for FP16 instructions. */
#pragma GCC push_options
#pragma GCC target("fpu=neon-fp-armv8")
#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabd_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vabdv4hf(__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabdq_f16(float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vabdv8hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabs_f16(float16x4_t __a)
{
return __builtin_neon_vabsv4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vabsq_f16(float16x8_t __a)
{
return __builtin_neon_vabsv8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vadd_f16(float16x4_t __a, float16x4_t __b)
{
#ifdef __FAST_MATH__
return __a + __b;
#else
return __builtin_neon_vaddv4hf(__a, __b);
#endif
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vaddq_f16(float16x8_t __a, float16x8_t __b)
{
#ifdef __FAST_MATH__
return __a + __b;
#else
return __builtin_neon_vaddv8hf(__a, __b);
#endif
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcage_f16(float16x4_t __a, float16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcagev4hf(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcageq_f16(float16x8_t __a, float16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcagev8hf(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcagt_f16(float16x4_t __a, float16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcagtv4hf(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcagtq_f16(float16x8_t __a, float16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcagtv8hf(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcale_f16(float16x4_t __a, float16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcalev4hf(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcaleq_f16(float16x8_t __a, float16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcalev8hf(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcalt_f16(float16x4_t __a, float16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcaltv4hf(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcaltq_f16(float16x8_t __a, float16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcaltv8hf(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceq_f16(float16x4_t __a, float16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vceqv4hf(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceqq_f16(float16x8_t __a, float16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vceqv8hf(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceqz_f16(float16x4_t __a)
{
return (uint16x4_t)__builtin_neon_vceqzv4hf(__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vceqzq_f16(float16x8_t __a)
{
return (uint16x8_t)__builtin_neon_vceqzv8hf(__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcge_f16(float16x4_t __a, float16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcgev4hf(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgeq_f16(float16x8_t __a, float16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcgev8hf(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgez_f16(float16x4_t __a)
{
return (uint16x4_t)__builtin_neon_vcgezv4hf(__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgezq_f16(float16x8_t __a)
{
return (uint16x8_t)__builtin_neon_vcgezv8hf(__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgt_f16(float16x4_t __a, float16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcgtv4hf(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgtq_f16(float16x8_t __a, float16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcgtv8hf(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgtz_f16(float16x4_t __a)
{
return (uint16x4_t)__builtin_neon_vcgtzv4hf(__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcgtzq_f16(float16x8_t __a)
{
return (uint16x8_t)__builtin_neon_vcgtzv8hf(__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcle_f16(float16x4_t __a, float16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vclev4hf(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcleq_f16(float16x8_t __a, float16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vclev8hf(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclez_f16(float16x4_t __a)
{
return (uint16x4_t)__builtin_neon_vclezv4hf(__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclezq_f16(float16x8_t __a)
{
return (uint16x8_t)__builtin_neon_vclezv8hf(__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vclt_f16(float16x4_t __a, float16x4_t __b)
{
return (uint16x4_t)__builtin_neon_vcltv4hf(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcltq_f16(float16x8_t __a, float16x8_t __b)
{
return (uint16x8_t)__builtin_neon_vcltv8hf(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcltz_f16(float16x4_t __a)
{
return (uint16x4_t)__builtin_neon_vcltzv4hf(__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcltzq_f16(float16x8_t __a)
{
return (uint16x8_t)__builtin_neon_vcltzv8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_f16_s16(int16x4_t __a)
{
return (float16x4_t)__builtin_neon_vcvtsv4hi(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_f16_u16(uint16x4_t __a)
{
return (float16x4_t)__builtin_neon_vcvtuv4hi((int16x4_t)__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_s16_f16(float16x4_t __a)
{
return (int16x4_t)__builtin_neon_vcvtsv4hf(__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_u16_f16(float16x4_t __a)
{
return (uint16x4_t)__builtin_neon_vcvtuv4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_f16_s16(int16x8_t __a)
{
return (float16x8_t)__builtin_neon_vcvtsv8hi(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_f16_u16(uint16x8_t __a)
{
return (float16x8_t)__builtin_neon_vcvtuv8hi((int16x8_t)__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_s16_f16(float16x8_t __a)
{
return (int16x8_t)__builtin_neon_vcvtsv8hf(__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_u16_f16(float16x8_t __a)
{
return (uint16x8_t)__builtin_neon_vcvtuv8hf(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvta_s16_f16(float16x4_t __a)
{
return __builtin_neon_vcvtasv4hf(__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvta_u16_f16(float16x4_t __a)
{
return (uint16x4_t)__builtin_neon_vcvtauv4hf(__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtaq_s16_f16(float16x8_t __a)
{
return __builtin_neon_vcvtasv8hf(__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtaq_u16_f16(float16x8_t __a)
{
return (uint16x8_t)__builtin_neon_vcvtauv8hf(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtm_s16_f16(float16x4_t __a)
{
return __builtin_neon_vcvtmsv4hf(__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtm_u16_f16(float16x4_t __a)
{
return (uint16x4_t)__builtin_neon_vcvtmuv4hf(__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtmq_s16_f16(float16x8_t __a)
{
return __builtin_neon_vcvtmsv8hf(__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtmq_u16_f16(float16x8_t __a)
{
return (uint16x8_t)__builtin_neon_vcvtmuv8hf(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtn_s16_f16(float16x4_t __a)
{
return __builtin_neon_vcvtnsv4hf(__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtn_u16_f16(float16x4_t __a)
{
return (uint16x4_t)__builtin_neon_vcvtnuv4hf(__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtnq_s16_f16(float16x8_t __a)
{
return __builtin_neon_vcvtnsv8hf(__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtnq_u16_f16(float16x8_t __a)
{
return (uint16x8_t)__builtin_neon_vcvtnuv8hf(__a);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtp_s16_f16(float16x4_t __a)
{
return __builtin_neon_vcvtpsv4hf(__a);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtp_u16_f16(float16x4_t __a)
{
return (uint16x4_t)__builtin_neon_vcvtpuv4hf(__a);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtpq_s16_f16(float16x8_t __a)
{
return __builtin_neon_vcvtpsv8hf(__a);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtpq_u16_f16(float16x8_t __a)
{
return (uint16x8_t)__builtin_neon_vcvtpuv8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_n_f16_s16(int16x4_t __a, const int __b)
{
return __builtin_neon_vcvts_nv4hi(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_n_f16_u16(uint16x4_t __a, const int __b)
{
return __builtin_neon_vcvtu_nv4hi((int16x4_t)__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_n_f16_s16(int16x8_t __a, const int __b)
{
return __builtin_neon_vcvts_nv8hi(__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_n_f16_u16(uint16x8_t __a, const int __b)
{
return __builtin_neon_vcvtu_nv8hi((int16x8_t)__a, __b);
}
__extension__ extern __inline int16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_n_s16_f16(float16x4_t __a, const int __b)
{
return __builtin_neon_vcvts_nv4hf(__a, __b);
}
__extension__ extern __inline uint16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvt_n_u16_f16(float16x4_t __a, const int __b)
{
return (uint16x4_t)__builtin_neon_vcvtu_nv4hf(__a, __b);
}
__extension__ extern __inline int16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_n_s16_f16(float16x8_t __a, const int __b)
{
return __builtin_neon_vcvts_nv8hf(__a, __b);
}
__extension__ extern __inline uint16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcvtq_n_u16_f16(float16x8_t __a, const int __b)
{
return (uint16x8_t)__builtin_neon_vcvtu_nv8hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfma_f16(float16x4_t __a, float16x4_t __b, float16x4_t __c)
{
return __builtin_neon_vfmav4hf(__a, __b, __c);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmaq_f16(float16x8_t __a, float16x8_t __b, float16x8_t __c)
{
return __builtin_neon_vfmav8hf(__a, __b, __c);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfms_f16(float16x4_t __a, float16x4_t __b, float16x4_t __c)
{
return __builtin_neon_vfmsv4hf(__a, __b, __c);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmsq_f16(float16x8_t __a, float16x8_t __b, float16x8_t __c)
{
return __builtin_neon_vfmsv8hf(__a, __b, __c);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmax_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vmaxfv4hf(__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmaxq_f16(float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vmaxfv8hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmaxnm_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vmaxnmv4hf(__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmaxnmq_f16(float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vmaxnmv8hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmin_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vminfv4hf(__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vminq_f16(float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vminfv8hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vminnm_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vminnmv4hf(__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vminnmq_f16(float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vminnmv8hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_f16(float16x4_t __a, float16x4_t __b)
{
#ifdef __FAST_MATH__
return __a * __b;
#else
return __builtin_neon_vmulfv4hf(__a, __b);
#endif
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_lane_f16(float16x4_t __a, float16x4_t __b, const int __c)
{
return __builtin_neon_vmul_lanev4hf(__a, __b, __c);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmul_n_f16(float16x4_t __a, float16_t __b)
{
return __builtin_neon_vmul_nv4hf(__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_f16(float16x8_t __a, float16x8_t __b)
{
#ifdef __FAST_MATH__
return __a * __b;
#else
return __builtin_neon_vmulfv8hf(__a, __b);
#endif
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_lane_f16(float16x8_t __a, float16x4_t __b, const int __c)
{
return __builtin_neon_vmul_lanev8hf(__a, __b, __c);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmulq_n_f16(float16x8_t __a, float16_t __b)
{
return __builtin_neon_vmul_nv8hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vneg_f16(float16x4_t __a)
{
return __builtin_neon_vnegv4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vnegq_f16(float16x8_t __a)
{
return __builtin_neon_vnegv8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpadd_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vpaddv4hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmax_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vpmaxfv4hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vpmin_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vpminfv4hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrecpe_f16(float16x4_t __a)
{
return __builtin_neon_vrecpev4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrecpeq_f16(float16x8_t __a)
{
return __builtin_neon_vrecpev8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrnd_f16(float16x4_t __a)
{
return __builtin_neon_vrndv4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndq_f16(float16x8_t __a)
{
return __builtin_neon_vrndv8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrnda_f16(float16x4_t __a)
{
return __builtin_neon_vrndav4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndaq_f16(float16x8_t __a)
{
return __builtin_neon_vrndav8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndm_f16(float16x4_t __a)
{
return __builtin_neon_vrndmv4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndmq_f16(float16x8_t __a)
{
return __builtin_neon_vrndmv8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndn_f16(float16x4_t __a)
{
return __builtin_neon_vrndnv4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndnq_f16(float16x8_t __a)
{
return __builtin_neon_vrndnv8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndp_f16(float16x4_t __a)
{
return __builtin_neon_vrndpv4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndpq_f16(float16x8_t __a)
{
return __builtin_neon_vrndpv8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndx_f16(float16x4_t __a)
{
return __builtin_neon_vrndxv4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrndxq_f16(float16x8_t __a)
{
return __builtin_neon_vrndxv8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsqrte_f16(float16x4_t __a)
{
return __builtin_neon_vrsqrtev4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsqrteq_f16(float16x8_t __a)
{
return __builtin_neon_vrsqrtev8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrecps_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vrecpsv4hf(__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrecpsq_f16(float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vrecpsv8hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsqrts_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vrsqrtsv4hf(__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrsqrtsq_f16(float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vrsqrtsv8hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsub_f16(float16x4_t __a, float16x4_t __b)
{
#ifdef __FAST_MATH__
return __a - __b;
#else
return __builtin_neon_vsubv4hf(__a, __b);
#endif
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vsubq_f16(float16x8_t __a, float16x8_t __b)
{
#ifdef __FAST_MATH__
return __a - __b;
#else
return __builtin_neon_vsubv8hf(__a, __b);
#endif
}
#endif /* __ARM_FEATURE_VECTOR_FP16_ARITHMETIC. */
#pragma GCC pop_options
/* Half-precision data processing intrinsics. */
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbsl_f16(uint16x4_t __a, float16x4_t __b, float16x4_t __c)
{
return __builtin_neon_vbslv4hf((int16x4_t)__a, __b, __c);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vbslq_f16(uint16x8_t __a, float16x8_t __b, float16x8_t __c)
{
return __builtin_neon_vbslv8hf((int16x8_t)__a, __b, __c);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_n_f16(float16_t __a)
{
return __builtin_neon_vdup_nv4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_n_f16(float16_t __a)
{
return __builtin_neon_vdup_nv8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdup_lane_f16(float16x4_t __a, const int __b)
{
return __builtin_neon_vdup_lanev4hf(__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdupq_lane_f16(float16x4_t __a, const int __b)
{
return __builtin_neon_vdup_lanev8hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vext_f16(float16x4_t __a, float16x4_t __b, const int __c)
{
return __builtin_neon_vextv4hf(__a, __b, __c);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vextq_f16(float16x8_t __a, float16x8_t __b, const int __c)
{
return __builtin_neon_vextv8hf(__a, __b, __c);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmov_n_f16(float16_t __a)
{
return __builtin_neon_vdup_nv4hf(__a);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vmovq_n_f16(float16_t __a)
{
return __builtin_neon_vdup_nv8hf(__a);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64_f16(float16x4_t __a)
{
return (float16x4_t)__builtin_shuffle(__a, (uint16x4_t){3, 2, 1, 0});
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vrev64q_f16(float16x8_t __a)
{
return (float16x8_t)__builtin_shuffle(__a,
(uint16x8_t){3, 2, 1, 0, 7, 6, 5, 4});
}
__extension__ extern __inline float16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrn_f16(float16x4_t __a, float16x4_t __b)
{
float16x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){5, 1, 7, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){4, 0, 6, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){0, 4, 2, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){1, 5, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline float16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vtrnq_f16(float16x8_t __a, float16x8_t __b)
{
float16x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){9, 1, 11, 3, 13, 5, 15, 7});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){8, 0, 10, 2, 12, 4, 14, 6});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){0, 8, 2, 10, 4, 12, 6, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){1, 9, 3, 11, 5, 13, 7, 15});
#endif
return __rv;
}
__extension__ extern __inline float16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzp_f16(float16x4_t __a, float16x4_t __b)
{
float16x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){5, 7, 1, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){4, 6, 0, 2});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){0, 2, 4, 6});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){1, 3, 5, 7});
#endif
return __rv;
}
__extension__ extern __inline float16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vuzpq_f16(float16x8_t __a, float16x8_t __b)
{
float16x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){5, 7, 1, 3, 13, 15, 9, 11});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){4, 6, 0, 2, 12, 14, 8, 10});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){0, 2, 4, 6, 8, 10, 12, 14});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){1, 3, 5, 7, 9, 11, 13, 15});
#endif
return __rv;
}
__extension__ extern __inline float16x4x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzip_f16(float16x4_t __a, float16x4_t __b)
{
float16x4x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){6, 2, 7, 3});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){4, 0, 5, 1});
#else
__rv.val[0] = __builtin_shuffle(__a, __b, (uint16x4_t){0, 4, 1, 5});
__rv.val[1] = __builtin_shuffle(__a, __b, (uint16x4_t){2, 6, 3, 7});
#endif
return __rv;
}
__extension__ extern __inline float16x8x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vzipq_f16(float16x8_t __a, float16x8_t __b)
{
float16x8x2_t __rv;
#ifdef __ARM_BIG_ENDIAN
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){10, 2, 11, 3, 8, 0, 9, 1});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){14, 6, 15, 7, 12, 4, 13, 5});
#else
__rv.val[0] =
__builtin_shuffle(__a, __b, (uint16x8_t){0, 8, 1, 9, 2, 10, 3, 11});
__rv.val[1] =
__builtin_shuffle(__a, __b, (uint16x8_t){4, 12, 5, 13, 6, 14, 7, 15});
#endif
return __rv;
}
#endif
/* AdvSIMD Dot Product intrinsics. */
#if __ARM_ARCH >= 8
#pragma GCC push_options
#pragma GCC target("arch=armv8.2-a+dotprod")
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdot_u32(uint32x2_t __r, uint8x8_t __a, uint8x8_t __b)
{
return __builtin_neon_udotv8qi_uuuu(__r, __a, __b);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdotq_u32(uint32x4_t __r, uint8x16_t __a, uint8x16_t __b)
{
return __builtin_neon_udotv16qi_uuuu(__r, __a, __b);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdot_s32(int32x2_t __r, int8x8_t __a, int8x8_t __b)
{
return __builtin_neon_sdotv8qi(__r, __a, __b);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdotq_s32(int32x4_t __r, int8x16_t __a, int8x16_t __b)
{
return __builtin_neon_sdotv16qi(__r, __a, __b);
}
__extension__ extern __inline uint32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdot_lane_u32(uint32x2_t __r, uint8x8_t __a, uint8x8_t __b,
const int __index)
{
return __builtin_neon_udot_lanev8qi_uuuus(__r, __a, __b, __index);
}
__extension__ extern __inline uint32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdotq_lane_u32(uint32x4_t __r, uint8x16_t __a, uint8x8_t __b,
const int __index)
{
return __builtin_neon_udot_lanev16qi_uuuus(__r, __a, __b, __index);
}
__extension__ extern __inline int32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdot_lane_s32(int32x2_t __r, int8x8_t __a, int8x8_t __b, const int __index)
{
return __builtin_neon_sdot_lanev8qi(__r, __a, __b, __index);
}
__extension__ extern __inline int32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vdotq_lane_s32(int32x4_t __r, int8x16_t __a, int8x8_t __b,
const int __index)
{
return __builtin_neon_sdot_lanev16qi(__r, __a, __b, __index);
}
#pragma GCC pop_options
#endif
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
#pragma GCC push_options
#pragma GCC target("arch=armv8.2-a+fp16fml")
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlal_low_f16(float32x2_t __r, float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vfmal_lowv2sf(__r, __a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlsl_low_f16(float32x2_t __r, float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vfmsl_lowv2sf(__r, __a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlal_high_f16(float32x2_t __r, float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vfmal_highv2sf(__r, __a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlsl_high_f16(float32x2_t __r, float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vfmsl_highv2sf(__r, __a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlalq_low_f16(float32x4_t __r, float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vfmal_lowv4sf(__r, __a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlslq_low_f16(float32x4_t __r, float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vfmsl_lowv4sf(__r, __a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlalq_high_f16(float32x4_t __r, float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vfmal_highv4sf(__r, __a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlslq_high_f16(float32x4_t __r, float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vfmsl_highv4sf(__r, __a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlal_lane_low_f16(float32x2_t __r, float16x4_t __a, float16x4_t __b,
const int __index)
{
__builtin_arm_lane_check(4, __index);
return __builtin_neon_vfmal_lane_lowv2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlal_lane_high_f16(float32x2_t __r, float16x4_t __a, float16x4_t __b,
const int __index)
{
__builtin_arm_lane_check(4, __index);
return __builtin_neon_vfmal_lane_highv2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlalq_laneq_low_f16(float32x4_t __r, float16x8_t __a, float16x8_t __b,
const int __index)
{
__builtin_arm_lane_check(8, __index);
return __builtin_neon_vfmal_lane_lowv4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlalq_lane_low_f16(float32x4_t __r, float16x8_t __a, float16x4_t __b,
const int __index)
{
__builtin_arm_lane_check(4, __index);
return __builtin_neon_vfmal_lane_lowv4hfv4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlal_laneq_low_f16(float32x2_t __r, float16x4_t __a, float16x8_t __b,
const int __index)
{
__builtin_arm_lane_check(8, __index);
return __builtin_neon_vfmal_lane_lowv8hfv2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlalq_laneq_high_f16(float32x4_t __r, float16x8_t __a, float16x8_t __b,
const int __index)
{
__builtin_arm_lane_check(8, __index);
return __builtin_neon_vfmal_lane_highv4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlalq_lane_high_f16(float32x4_t __r, float16x8_t __a, float16x4_t __b,
const int __index)
{
__builtin_arm_lane_check(4, __index);
return __builtin_neon_vfmal_lane_highv4hfv4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlal_laneq_high_f16(float32x2_t __r, float16x4_t __a, float16x8_t __b,
const int __index)
{
__builtin_arm_lane_check(8, __index);
return __builtin_neon_vfmal_lane_highv8hfv2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlsl_lane_low_f16(float32x2_t __r, float16x4_t __a, float16x4_t __b,
const int __index)
{
__builtin_arm_lane_check(4, __index);
return __builtin_neon_vfmsl_lane_lowv2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlsl_lane_high_f16(float32x2_t __r, float16x4_t __a, float16x4_t __b,
const int __index)
{
__builtin_arm_lane_check(4, __index);
return __builtin_neon_vfmsl_lane_highv2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlslq_laneq_low_f16(float32x4_t __r, float16x8_t __a, float16x8_t __b,
const int __index)
{
__builtin_arm_lane_check(8, __index);
return __builtin_neon_vfmsl_lane_lowv4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlslq_lane_low_f16(float32x4_t __r, float16x8_t __a, float16x4_t __b,
const int __index)
{
__builtin_arm_lane_check(4, __index);
return __builtin_neon_vfmsl_lane_lowv4hfv4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlsl_laneq_low_f16(float32x2_t __r, float16x4_t __a, float16x8_t __b,
const int __index)
{
__builtin_arm_lane_check(8, __index);
return __builtin_neon_vfmsl_lane_lowv8hfv2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlslq_laneq_high_f16(float32x4_t __r, float16x8_t __a, float16x8_t __b,
const int __index)
{
__builtin_arm_lane_check(8, __index);
return __builtin_neon_vfmsl_lane_highv4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlslq_lane_high_f16(float32x4_t __r, float16x8_t __a, float16x4_t __b,
const int __index)
{
__builtin_arm_lane_check(4, __index);
return __builtin_neon_vfmsl_lane_highv4hfv4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vfmlsl_laneq_high_f16(float32x2_t __r, float16x4_t __a, float16x8_t __b,
const int __index)
{
__builtin_arm_lane_check(8, __index);
return __builtin_neon_vfmsl_lane_highv8hfv2sf(__r, __a, __b, __index);
}
#pragma GCC pop_options
#endif
/* AdvSIMD Complex numbers intrinsics. */
#if __ARM_ARCH >= 8
#pragma GCC push_options
#pragma GCC target("arch=armv8.3-a")
#if defined(__ARM_FP16_FORMAT_IEEE) || defined(__ARM_FP16_FORMAT_ALTERNATIVE)
#pragma GCC push_options
#pragma GCC target("+fp16")
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcadd_rot90_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vcadd90v4hf(__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcaddq_rot90_f16(float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vcadd90v8hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcadd_rot270_f16(float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vcadd90v4hf(__a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcaddq_rot270_f16(float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vcadd90v8hf(__a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_f16(float16x4_t __r, float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vcmla0v4hf(__r, __a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_f16(float16x8_t __r, float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vcmla0v8hf(__r, __a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_lane_f16(float16x4_t __r, float16x4_t __a, float16x4_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane0v4hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_laneq_f16(float16x4_t __r, float16x4_t __a, float16x8_t __b,
const int __index)
{
return __builtin_neon_vcmla_laneq0v4hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_lane_f16(float16x8_t __r, float16x8_t __a, float16x4_t __b,
const int __index)
{
return __builtin_neon_vcmlaq_lane0v8hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_laneq_f16(float16x8_t __r, float16x8_t __a, float16x8_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane0v8hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot90_f16(float16x4_t __r, float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vcmla90v4hf(__r, __a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot90_f16(float16x8_t __r, float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vcmla90v8hf(__r, __a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot90_lane_f16(float16x4_t __r, float16x4_t __a, float16x4_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane90v4hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot90_laneq_f16(float16x4_t __r, float16x4_t __a, float16x8_t __b,
const int __index)
{
return __builtin_neon_vcmla_laneq90v4hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot90_lane_f16(float16x8_t __r, float16x8_t __a, float16x4_t __b,
const int __index)
{
return __builtin_neon_vcmlaq_lane90v8hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot90_laneq_f16(float16x8_t __r, float16x8_t __a, float16x8_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane90v8hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot180_f16(float16x4_t __r, float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vcmla180v4hf(__r, __a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot180_f16(float16x8_t __r, float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vcmla180v8hf(__r, __a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot180_lane_f16(float16x4_t __r, float16x4_t __a, float16x4_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane180v4hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot180_laneq_f16(float16x4_t __r, float16x4_t __a, float16x8_t __b,
const int __index)
{
return __builtin_neon_vcmla_laneq180v4hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot180_lane_f16(float16x8_t __r, float16x8_t __a, float16x4_t __b,
const int __index)
{
return __builtin_neon_vcmlaq_lane180v8hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot180_laneq_f16(float16x8_t __r, float16x8_t __a, float16x8_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane180v8hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot270_f16(float16x4_t __r, float16x4_t __a, float16x4_t __b)
{
return __builtin_neon_vcmla270v4hf(__r, __a, __b);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot270_f16(float16x8_t __r, float16x8_t __a, float16x8_t __b)
{
return __builtin_neon_vcmla270v8hf(__r, __a, __b);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot270_lane_f16(float16x4_t __r, float16x4_t __a, float16x4_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane270v4hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot270_laneq_f16(float16x4_t __r, float16x4_t __a, float16x8_t __b,
const int __index)
{
return __builtin_neon_vcmla_laneq270v4hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot270_lane_f16(float16x8_t __r, float16x8_t __a, float16x4_t __b,
const int __index)
{
return __builtin_neon_vcmlaq_lane270v8hf(__r, __a, __b, __index);
}
__extension__ extern __inline float16x8_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot270_laneq_f16(float16x8_t __r, float16x8_t __a, float16x8_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane270v8hf(__r, __a, __b, __index);
}
#pragma GCC pop_options
#endif
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcadd_rot90_f32(float32x2_t __a, float32x2_t __b)
{
return __builtin_neon_vcadd90v2sf(__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcaddq_rot90_f32(float32x4_t __a, float32x4_t __b)
{
return __builtin_neon_vcadd90v4sf(__a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcadd_rot270_f32(float32x2_t __a, float32x2_t __b)
{
return __builtin_neon_vcadd90v2sf(__a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcaddq_rot270_f32(float32x4_t __a, float32x4_t __b)
{
return __builtin_neon_vcadd90v4sf(__a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_f32(float32x2_t __r, float32x2_t __a, float32x2_t __b)
{
return __builtin_neon_vcmla0v2sf(__r, __a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_f32(float32x4_t __r, float32x4_t __a, float32x4_t __b)
{
return __builtin_neon_vcmla0v4sf(__r, __a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_lane_f32(float32x2_t __r, float32x2_t __a, float32x2_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane0v2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_laneq_f32(float32x2_t __r, float32x2_t __a, float32x4_t __b,
const int __index)
{
return __builtin_neon_vcmla_laneq0v2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_lane_f32(float32x4_t __r, float32x4_t __a, float32x2_t __b,
const int __index)
{
return __builtin_neon_vcmlaq_lane0v4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_laneq_f32(float32x4_t __r, float32x4_t __a, float32x4_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane0v4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot90_f32(float32x2_t __r, float32x2_t __a, float32x2_t __b)
{
return __builtin_neon_vcmla90v2sf(__r, __a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot90_f32(float32x4_t __r, float32x4_t __a, float32x4_t __b)
{
return __builtin_neon_vcmla90v4sf(__r, __a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot90_lane_f32(float32x2_t __r, float32x2_t __a, float32x2_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane90v2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot90_laneq_f32(float32x2_t __r, float32x2_t __a, float32x4_t __b,
const int __index)
{
return __builtin_neon_vcmla_laneq90v2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot90_lane_f32(float32x4_t __r, float32x4_t __a, float32x2_t __b,
const int __index)
{
return __builtin_neon_vcmlaq_lane90v4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot90_laneq_f32(float32x4_t __r, float32x4_t __a, float32x4_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane90v4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot180_f32(float32x2_t __r, float32x2_t __a, float32x2_t __b)
{
return __builtin_neon_vcmla180v2sf(__r, __a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot180_f32(float32x4_t __r, float32x4_t __a, float32x4_t __b)
{
return __builtin_neon_vcmla180v4sf(__r, __a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot180_lane_f32(float32x2_t __r, float32x2_t __a, float32x2_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane180v2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot180_laneq_f32(float32x2_t __r, float32x2_t __a, float32x4_t __b,
const int __index)
{
return __builtin_neon_vcmla_laneq180v2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot180_lane_f32(float32x4_t __r, float32x4_t __a, float32x2_t __b,
const int __index)
{
return __builtin_neon_vcmlaq_lane180v4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot180_laneq_f32(float32x4_t __r, float32x4_t __a, float32x4_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane180v4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot270_f32(float32x2_t __r, float32x2_t __a, float32x2_t __b)
{
return __builtin_neon_vcmla270v2sf(__r, __a, __b);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot270_f32(float32x4_t __r, float32x4_t __a, float32x4_t __b)
{
return __builtin_neon_vcmla270v4sf(__r, __a, __b);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot270_lane_f32(float32x2_t __r, float32x2_t __a, float32x2_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane270v2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x2_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmla_rot270_laneq_f32(float32x2_t __r, float32x2_t __a, float32x4_t __b,
const int __index)
{
return __builtin_neon_vcmla_laneq270v2sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot270_lane_f32(float32x4_t __r, float32x4_t __a, float32x2_t __b,
const int __index)
{
return __builtin_neon_vcmlaq_lane270v4sf(__r, __a, __b, __index);
}
__extension__ extern __inline float32x4_t
__attribute__((__always_inline__, __gnu_inline__, __artificial__))
vcmlaq_rot270_laneq_f32(float32x4_t __r, float32x4_t __a, float32x4_t __b,
const int __index)
{
return __builtin_neon_vcmla_lane270v4sf(__r, __a, __b, __index);
}
#pragma GCC pop_options
#endif
#ifdef __cplusplus
}
#endif
#pragma GCC pop_options
#endif
#endif
| 30.943732 | 111 | 0.741155 |
180c98311cf489158f94a28306250c431aaf4504 | 7,041 | h | C | resources/home/dnanexus/root/include/Math/GenVector/RotationY.h | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/home/dnanexus/root/include/Math/GenVector/RotationY.h | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/home/dnanexus/root/include/Math/GenVector/RotationY.h | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | 1 | 2020-05-28T23:01:44.000Z | 2020-05-28T23:01:44.000Z | // @(#)root/mathcore:$Id$
// Authors: W. Brown, M. Fischler, L. Moneta 2005
/**********************************************************************
* *
* Copyright (c) 2005 , LCG ROOT FNAL MathLib Team *
* *
* *
**********************************************************************/
// Header file for class RotationY representing a rotation about the Y axis
//
// Created by: Mark Fischler Mon July 18 2005
//
// Last update: $Id$
//
#ifndef ROOT_Math_GenVector_RotationY
#define ROOT_Math_GenVector_RotationY 1
#include "Math/GenVector/Cartesian3D.h"
#include "Math/GenVector/DisplacementVector3D.h"
#include "Math/GenVector/PositionVector3D.h"
#include "Math/GenVector/LorentzVector.h"
#include "Math/GenVector/3DDistances.h"
#include "Math/GenVector/RotationYfwd.h"
#include <cmath>
namespace ROOT {
namespace Math {
//__________________________________________________________________________________________
/**
Rotation class representing a 3D rotation about the Y axis by the angle of rotation.
For efficiency reason, in addition to the the angle, the sine and cosine of the angle are held
@ingroup GenVector
*/
class RotationY {
public:
typedef double Scalar;
// ========== Constructors and Assignment =====================
/**
Default constructor (identity rotation)
*/
RotationY() : fAngle(0), fSin(0), fCos(1) { }
/**
Construct from an angle
*/
explicit RotationY( Scalar angle ) : fAngle(angle),
fSin(std::sin(angle)),
fCos(std::cos(angle))
{
Rectify();
}
// The compiler-generated copy ctor, copy assignment, and dtor are OK.
/**
Rectify makes sure the angle is in (-pi,pi]
*/
void Rectify() {
if ( std::fabs(fAngle) >= M_PI ) {
double x = fAngle / (2.0 * M_PI);
fAngle = (2.0 * M_PI) * ( x + std::floor(.5-x) );
fSin = std::sin(fAngle);
fCos = std::cos(fAngle);
}
}
// ======== Components ==============
/**
Set given the angle.
*/
void SetAngle (Scalar angle) {
fSin=std::sin(angle);
fCos=std::cos(angle);
fAngle= angle;
Rectify();
}
void SetComponents (Scalar angle) { SetAngle(angle); }
/**
Get the angle
*/
void GetAngle(Scalar &angle) const { angle = atan2(fSin, fCos); }
void GetComponents ( Scalar & angle ) const { GetAngle(angle); }
/**
Angle of rotation
*/
Scalar Angle() const { return atan2(fSin, fCos); }
/**
Sine or Cosine of the rotation angle
*/
Scalar SinAngle () const { return fSin; }
Scalar CosAngle () const { return fCos; }
// =========== operations ==============
// /**
// Rotation operation on a cartesian vector
// */
// typedef DisplacementVector3D< Cartesian3D<double> > XYZVector;
// XYZVector operator() (const XYZVector & v) const {
// return XYZVector
// ( fCos*v.x()+fSin*v.z(), v.y(), fCos*v.z()-fSin*v.x() );
// }
/**
Rotation operation on a displacement vector in any coordinate system
*/
template <class CoordSystem, class U>
DisplacementVector3D<CoordSystem,U>
operator() (const DisplacementVector3D<CoordSystem,U> & v) const {
DisplacementVector3D< Cartesian3D<double>,U > xyz;
xyz.SetXYZ( fCos*v.x()+fSin*v.z(), v.y(), fCos*v.z()-fSin*v.x() );
return DisplacementVector3D<CoordSystem,U>(xyz);
}
/**
Rotation operation on a position vector in any coordinate system
*/
template <class CoordSystem, class U>
PositionVector3D<CoordSystem, U>
operator() (const PositionVector3D<CoordSystem,U> & v) const {
DisplacementVector3D< Cartesian3D<double>,U > xyz(v);
DisplacementVector3D< Cartesian3D<double>,U > rxyz = operator()(xyz);
return PositionVector3D<CoordSystem,U> ( rxyz );
}
/**
Rotation operation on a Lorentz vector in any 4D coordinate system
*/
template <class CoordSystem>
LorentzVector<CoordSystem>
operator() (const LorentzVector<CoordSystem> & v) const {
DisplacementVector3D< Cartesian3D<double> > xyz(v.Vect());
xyz = operator()(xyz);
LorentzVector< PxPyPzE4D<double> > xyzt (xyz.X(), xyz.Y(), xyz.Z(), v.E());
return LorentzVector<CoordSystem> ( xyzt );
}
/**
Rotation operation on an arbitrary vector v.
Preconditions: v must implement methods x(), y(), and z()
and the arbitrary vector type must have a constructor taking (x,y,z)
*/
template <class ForeignVector>
ForeignVector
operator() (const ForeignVector & v) const {
DisplacementVector3D< Cartesian3D<double> > xyz(v);
DisplacementVector3D< Cartesian3D<double> > rxyz = operator()(xyz);
return ForeignVector ( rxyz.X(), rxyz.Y(), rxyz.Z() );
}
/**
Overload operator * for rotation on a vector
*/
template <class AVector>
inline
AVector operator* (const AVector & v) const
{
return operator()(v);
}
/**
Invert a rotation in place
*/
void Invert() { fAngle = -fAngle; fSin = -fSin; }
/**
Return inverse of a rotation
*/
RotationY Inverse() const { RotationY t(*this); t.Invert(); return t; }
// ========= Multi-Rotation Operations ===============
/**
Multiply (combine) two rotations
*/
RotationY operator * (const RotationY & r) const {
RotationY ans;
double x = (fAngle + r.fAngle) / (2.0 * M_PI);
ans.fAngle = (2.0 * M_PI) * ( x + std::floor(.5-x) );
ans.fSin = fSin*r.fCos + fCos*r.fSin;
ans.fCos = fCos*r.fCos - fSin*r.fSin;
return ans;
}
/**
Post-Multiply (on right) by another rotation : T = T*R
*/
RotationY & operator *= (const RotationY & r) { return *this = (*this)*r; }
/**
Equality/inequality operators
*/
bool operator == (const RotationY & rhs) const {
if( fAngle != rhs.fAngle ) return false;
return true;
}
bool operator != (const RotationY & rhs) const {
return ! operator==(rhs);
}
private:
Scalar fAngle; // rotation angle
Scalar fSin; // sine of the rotation angle
Scalar fCos; // cosine of the rotaiton angle
}; // RotationY
// ============ Class RotationY ends here ============
/**
Distance between two rotations
*/
template <class R>
inline
typename RotationY::Scalar
Distance ( const RotationY& r1, const R & r2) {return gv_detail::dist(r1,r2);}
/**
Stream Output and Input
*/
// TODO - I/O should be put in the manipulator form
inline
std::ostream & operator<< (std::ostream & os, const RotationY & r) {
os << " RotationY(" << r.Angle() << ") ";
return os;
}
} // namespace Math
} // namespace ROOT
#endif // ROOT_Math_GenVector_RotationY
| 27.83004 | 100 | 0.574918 |
afac8d6de7aa0ba6c92e9b28e5d7542d1b93a686 | 2,790 | h | C | System/Library/PrivateFrameworks/HealthUI.framework/HKEmergencyCardNameAndPictureTableItem.h | lechium/iOS1351Headers | 6bed3dada5ffc20366b27f7f2300a24a48a6284e | [
"MIT"
] | 2 | 2021-11-02T09:23:27.000Z | 2022-03-28T08:21:57.000Z | System/Library/PrivateFrameworks/HealthUI.framework/HKEmergencyCardNameAndPictureTableItem.h | lechium/iOS1351Headers | 6bed3dada5ffc20366b27f7f2300a24a48a6284e | [
"MIT"
] | null | null | null | System/Library/PrivateFrameworks/HealthUI.framework/HKEmergencyCardNameAndPictureTableItem.h | lechium/iOS1351Headers | 6bed3dada5ffc20366b27f7f2300a24a48a6284e | [
"MIT"
] | 1 | 2022-03-28T08:21:59.000Z | 2022-03-28T08:21:59.000Z | /*
* This header is generated by classdump-dyld 1.5
* on Wednesday, October 27, 2021 at 3:22:26 PM Mountain Standard Time
* Operating System: Version 13.5.1 (Build 17F80)
* Image Source: /System/Library/PrivateFrameworks/HealthUI.framework/HealthUI
* classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by Elias Limneos. Updated by Kevin Bradley.
*/
#import <HealthUI/HealthUI-Structs.h>
#import <HealthUI/HKEmergencyCardTableItem.h>
#import <libobjc.A.dylib/HKMedicalIDEditorCellEditDelegate.h>
#import <libobjc.A.dylib/HKMedicalIDEditorCellHeightChangeDelegate.h>
#import <UIKit/UINavigationControllerDelegate.h>
#import <UIKit/UIImagePickerControllerDelegate.h>
@protocol HKEmergencyCardRowHeightChangeDelegate;
@class HKMedicalIDEditorNameAndPhotoCell, NSString;
@interface HKEmergencyCardNameAndPictureTableItem : HKEmergencyCardTableItem <HKMedicalIDEditorCellEditDelegate, HKMedicalIDEditorCellHeightChangeDelegate, UINavigationControllerDelegate, UIImagePickerControllerDelegate> {
HKMedicalIDEditorNameAndPhotoCell* _cell;
id<HKEmergencyCardRowHeightChangeDelegate> _rowHeightChangeDelegate;
}
@property (assign,nonatomic,__weak) id<HKEmergencyCardRowHeightChangeDelegate> rowHeightChangeDelegate; //@synthesize rowHeightChangeDelegate=_rowHeightChangeDelegate - In the implementation block
@property (readonly) unsigned long long hash;
@property (readonly) Class superclass;
@property (copy,readonly) NSString * description;
@property (copy,readonly) NSString * debugDescription;
-(void)setData:(id)arg1 ;
-(id)title;
-(id)_cell;
-(void)imagePickerControllerDidCancel:(id)arg1 ;
-(void)imagePickerController:(id)arg1 didFinishPickingMediaWithInfo:(id)arg2 ;
-(unsigned long long)navigationControllerSupportedInterfaceOrientations:(id)arg1 ;
-(void)commitEditing;
-(id)initInEditMode:(BOOL)arg1 ;
-(id)tableView:(id)arg1 cellForRowAtIndex:(long long)arg2 ;
-(double)tableView:(id)arg1 estimatedHeightForRowAtIndex:(long long)arg2 ;
-(double)tableView:(id)arg1 heightForRowAtIndex:(long long)arg2 ;
-(void)medicalIDEditorCellDidChangeSelection:(id)arg1 keepRectVisible:(CGRect)arg2 inView:(id)arg3 ;
-(void)medicalIDEditorCellDidBeginEditing:(id)arg1 keepRectVisible:(CGRect)arg2 inView:(id)arg3 ;
-(void)medicalIDEditorCell:(id)arg1 didChangeHeight:(double)arg2 keepRectVisible:(CGRect)arg3 inView:(id)arg4 ;
-(void)setRowHeightChangeDelegate:(id<HKEmergencyCardRowHeightChangeDelegate>)arg1 ;
-(void)medicalIDEditorCellDidChangeValue:(id)arg1 ;
-(void)_editPhotoTapped:(id)arg1 ;
-(double)_cellFittedHeightWithWidth:(double)arg1 ;
-(id<HKEmergencyCardRowHeightChangeDelegate>)rowHeightChangeDelegate;
@end
| 53.653846 | 222 | 0.783154 |
bbde923872396f0b68371e70ae97b499b867ae8e | 26,513 | c | C | components/chip_riscv_dummy/drivers/uart.c | 1847123212/YoC-open | f4e20c67256472d863ea6d118e3ecbaa1e879d4a | [
"Apache-2.0"
] | 9 | 2020-05-12T03:01:55.000Z | 2021-08-12T10:22:31.000Z | components/chip_riscv_dummy/drivers/uart.c | 1847123212/YoC-open | f4e20c67256472d863ea6d118e3ecbaa1e879d4a | [
"Apache-2.0"
] | null | null | null | components/chip_riscv_dummy/drivers/uart.c | 1847123212/YoC-open | f4e20c67256472d863ea6d118e3ecbaa1e879d4a | [
"Apache-2.0"
] | 12 | 2020-04-15T11:37:33.000Z | 2021-09-13T13:19:04.000Z | /*
* Copyright (C) 2017-2020 Alibaba Group Holding Limited
*/
/******************************************************************************
* @file uart.c
* @brief CSI Source File for uart Driver
* @version V2.01
* @date 2020-04-09
******************************************************************************/
#include <drv/uart.h>
#include <drv/dma.h>
#include <drv/irq.h>
#include <drv/gpio.h>
#include <drv/pin.h>
#include <drv/porting.h>
#include <soc.h>
#include <dw_uart_ll.h>
#include <drv/tick.h>
#define UART_TIMEOUT 0x10000000U
#define UART_MAX_FIFO 0x10U
extern uint16_t uart_tx_hs_num[];
extern uint16_t uart_rx_hs_num[];
extern const csi_pinmap_t uart_pinmap[];
static void rx_pin_uart_to_gpio(uint8_t dev_idx, pin_name_t *rx_pin);
static void tx_pin_uart_to_gpio(uint8_t dev_idx, pin_name_t *tx_pin);
static void rx_pin_gpio_to_uart(uint8_t dev_idx, pin_name_t *rx_pin);
static void tx_pin_gpio_to_uart(uint8_t dev_idx, pin_name_t *tx_pin);
static void dw_uart_clear_send_fifo(dw_uart_regs_t *uart_base)
{
#ifdef CONFIG_CHIP_DANICA
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
dw_uart_putchar(uart_base, 0xFF);
mdelay(80);
#endif
}
static uint8_t find_max_prime_num(uint32_t num)
{
uint8_t ret;
if (!(num % 8U)) {
ret = 8U;
} else if (!(num % 4U)) {
ret = 4U;
} else {
ret = 1U;
}
return ret;
}
static void dw_uart_intr_recv_data(csi_uart_t *uart)
{
dw_uart_regs_t *uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
uint32_t rxfifo_num = dw_uart_get_receive_fifo_waiting_data(uart_base);
uint32_t rxdata_num = (rxfifo_num > uart->rx_size) ? uart->rx_size : rxfifo_num;
if ((uart->rx_data == NULL) || (uart->rx_size == 0U)) {
if (uart->callback) {
uart->callback(uart, UART_EVENT_RECEIVE_FIFO_READABLE, uart->arg);
} else {
do {
dw_uart_getchar(uart_base);
} while (--rxfifo_num);
}
} else {
do {
*uart->rx_data = dw_uart_getchar(uart_base);
uart->rx_size--;
uart->rx_data++;
} while (--rxdata_num);
if (uart->rx_size == 0U) {
uart->state.readable = 1U;
if (uart->callback) {
uart->callback(uart, UART_EVENT_RECEIVE_COMPLETE, uart->arg);
}
}
}
}
static void uart_intr_send_data(csi_uart_t *uart)
{
uint32_t i = 0U, trans_num = 0U;
dw_uart_regs_t *uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
if (uart->tx_size > UART_MAX_FIFO) {
trans_num = UART_MAX_FIFO;
} else {
trans_num = uart->tx_size;
}
for (i = 0U; i < trans_num; i++) {
dw_uart_putchar(uart_base, *uart->tx_data);
uart->tx_size--;
uart->tx_data++;
}
if (uart->tx_size == 0U) {
dw_uart_disable_trans_irq(uart_base);
uart->state.writeable = 1U;
if (uart->callback) {
uart->callback(uart, UART_EVENT_SEND_COMPLETE, uart->arg);
}
}
}
static void uart_intr_line_error(csi_uart_t *uart)
{
uint32_t uart_status;
dw_uart_regs_t *uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
uart->state.readable = 1U;
uart->state.writeable = 1U;
uart_status = dw_uart_get_line_status(uart_base);
if (uart->callback) {
if (uart_status & DW_UART_LSR_OE_ERROR) {
uart->callback(uart, UART_EVENT_ERROR_OVERFLOW, uart->arg);
}
if (uart_status & DW_UART_LSR_PE_ERROR) {
uart->callback(uart, UART_EVENT_ERROR_PARITY, uart->arg);
}
if (uart_status & DW_UART_LSR_FE_ERROR) {
uart->callback(uart, UART_EVENT_ERROR_FRAMING, uart->arg);
}
if (uart_status & DW_UART_LSR_BI_SET) {
uart->callback(uart, UART_ENENT_BREAK_INTR, uart->arg);
}
}
}
void dw_uart_irq_handler(void *arg)
{
csi_uart_t *uart = (csi_uart_t *)arg;
dw_uart_regs_t *uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
uint8_t intr_state;
intr_state = (uint8_t)(uart_base->IIR & 0xfU);
switch (intr_state) {
case DW_UART_IIR_IID_RECV_LINE_STATUS: /* interrupt source: Overrun/parity/framing errors or break interrupt */
uart_intr_line_error(uart);
break;
case DW_UART_IIR_IID_THR_EMPTY: /* interrupt source:sendter holding register empty */
uart_intr_send_data(uart);
break;
case DW_UART_IIR_IID_RECV_DATA_AVAIL: /* interrupt source:receiver data available or receiver fifo trigger level reached */
case DW_UART_IIR_IID_CHARACTER_TIMEOUT:
dw_uart_intr_recv_data(uart);
break;
default:
break;
}
}
csi_error_t csi_uart_init(csi_uart_t *uart, uint32_t idx)
{
CSI_PARAM_CHK(uart, CSI_ERROR);
csi_error_t ret = CSI_OK;
dw_uart_regs_t *uart_base;
ret = target_get(DEV_DW_UART_TAG, idx, &uart->dev);
if (ret == CSI_OK) {
uart_base = (dw_uart_regs_t *)HANDLE_REG_BASE(uart);
dw_uart_fifo_init(uart_base);
uart->rx_size = 0U;
uart->tx_size = 0U;
uart->rx_data = NULL;
uart->tx_data = NULL;
uart->tx_dma = NULL;
uart->rx_dma = NULL;
dw_uart_disable_trans_irq(uart_base);
dw_uart_disable_recv_irq(uart_base);
}
return ret;
}
void csi_uart_uninit(csi_uart_t *uart)
{
CSI_PARAM_CHK_NORETVAL(uart);
dw_uart_regs_t *uart_base;
uart_base = (dw_uart_regs_t *)HANDLE_REG_BASE(uart);
uart->rx_size = 0U;
uart->tx_size = 0U;
uart->rx_data = NULL;
uart->tx_data = NULL;
dw_uart_disable_trans_irq(uart_base);
dw_uart_disable_recv_irq(uart_base);
csi_irq_disable((uint32_t)(uart->dev.irq_num));
csi_irq_detach((uint32_t)(uart->dev.irq_num));
}
ATTRIBUTE_DATA csi_error_t csi_uart_baud(csi_uart_t *uart, uint32_t baud)
{
CSI_PARAM_CHK(uart, CSI_ERROR);
uint32_t flag;
int32_t ret = 0;
csi_error_t csi_ret = CSI_OK;
pin_name_t rx_pin;
pin_name_t tx_pin;
dw_uart_regs_t *uart_base;
uint8_t temp[16];
uart_base = (dw_uart_regs_t *)HANDLE_REG_BASE(uart);
tx_pin_uart_to_gpio(uart->dev.idx, &tx_pin);
rx_pin_uart_to_gpio(uart->dev.idx, &rx_pin);
flag = csi_irq_save();
ret = dw_uart_config_baudrate(uart_base, baud, soc_get_uart_freq((uint32_t)(uart->dev.idx)));
csi_irq_restore(flag);
dw_uart_clear_send_fifo(uart_base);
rx_pin_gpio_to_uart(uart->dev.idx, &rx_pin);
tx_pin_gpio_to_uart(uart->dev.idx, &tx_pin);
csi_uart_receive(uart, temp, sizeof(temp), 0U);
if (ret == 0) {
csi_ret = CSI_OK;
} else {
csi_ret = CSI_ERROR;
}
return csi_ret;
}
csi_error_t csi_uart_format(csi_uart_t *uart, csi_uart_data_bits_t data_bits,
csi_uart_parity_t parity, csi_uart_stop_bits_t stop_bits)
{
CSI_PARAM_CHK(uart, CSI_ERROR);
int32_t ret = 0;
csi_error_t csi_ret = CSI_OK;
pin_name_t rx_pin;
pin_name_t tx_pin;
dw_uart_regs_t *uart_base;
uint8_t temp[16];
uart_base = (dw_uart_regs_t *)HANDLE_REG_BASE(uart);
tx_pin_uart_to_gpio(uart->dev.idx, &tx_pin);
rx_pin_uart_to_gpio(uart->dev.idx, &rx_pin);
switch (data_bits) {
case UART_DATA_BITS_5:
ret = dw_uart_config_data_bits(uart_base, 5U);
break;
case UART_DATA_BITS_6:
ret = dw_uart_config_data_bits(uart_base, 6U);
break;
case UART_DATA_BITS_7:
ret = dw_uart_config_data_bits(uart_base, 7U);
break;
case UART_DATA_BITS_8:
ret = dw_uart_config_data_bits(uart_base, 8U);
break;
default:
ret = -1;
break;
}
if (ret == 0) {
switch (parity) {
case UART_PARITY_NONE:
ret = dw_uart_config_parity_none(uart_base);
break;
case UART_PARITY_ODD:
ret = dw_uart_config_parity_odd(uart_base);
break;
case UART_PARITY_EVEN:
ret = dw_uart_config_parity_even(uart_base);
break;
default:
ret = -1;
break;
}
if (ret == 0) {
switch (stop_bits) {
case UART_STOP_BITS_1:
ret = dw_uart_config_stop_bits(uart_base, 1U);
break;
case UART_STOP_BITS_2:
ret = dw_uart_config_stop_bits(uart_base, 2U);
break;
case UART_STOP_BITS_1_5:
if (data_bits == UART_DATA_BITS_5) {
ret = dw_uart_config_stop_bits(uart_base, 2U);
break;
}
default:
ret = -1;
break;
}
if (ret != 0) {
csi_ret = CSI_ERROR;
}
} else {
csi_ret = CSI_ERROR;
}
} else {
csi_ret = CSI_ERROR;
}
dw_uart_clear_send_fifo(uart_base);
rx_pin_gpio_to_uart(uart->dev.idx, &rx_pin);
tx_pin_gpio_to_uart(uart->dev.idx, &tx_pin);
csi_uart_receive(uart, temp, sizeof(temp), 0U);
return csi_ret;
}
csi_error_t csi_uart_flowctrl(csi_uart_t *uart, csi_uart_flowctrl_t flowctrl)
{
CSI_PARAM_CHK(uart, CSI_ERROR);
csi_error_t csi_ret = CSI_OK;
dw_uart_regs_t *uart_base;
uart_base = (dw_uart_regs_t *)HANDLE_REG_BASE(uart);
switch (flowctrl) {
case UART_FLOWCTRL_CTS:
dw_uart_wait_idle(uart_base);
dw_uart_enable_auto_flow_control(uart_base);
break;
case UART_FLOWCTRL_RTS_CTS:
dw_uart_wait_idle(uart_base);
dw_uart_enable_rts(uart_base);
dw_uart_enable_auto_flow_control(uart_base);
break;
case UART_FLOWCTRL_NONE:
dw_uart_wait_idle(uart_base);
dw_uart_disable_auto_flow_control(uart_base);
break;
case UART_FLOWCTRL_RTS:
default:
csi_ret = CSI_UNSUPPORTED;
break;
}
return csi_ret;
}
void csi_uart_putc(csi_uart_t *uart, uint8_t ch)
{
CSI_PARAM_CHK_NORETVAL(uart);
uint32_t timeout = UART_TIMEOUT;
dw_uart_regs_t *uart_base;
uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
while (!dw_uart_putready(uart_base) && timeout--);
if (timeout) {
dw_uart_putchar(uart_base, ch);
}
}
ATTRIBUTE_DATA uint8_t csi_uart_getc(csi_uart_t *uart)
{
CSI_PARAM_CHK(uart, 0U);
dw_uart_regs_t *uart_base;
uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
while (!dw_uart_getready(uart_base));
return dw_uart_getchar(uart_base);
}
int32_t csi_uart_receive(csi_uart_t *uart, void *data, uint32_t size, uint32_t timeout)
{
CSI_PARAM_CHK(uart, CSI_ERROR);
CSI_PARAM_CHK(data, CSI_ERROR);
uint8_t *temp_data = (uint8_t *)data;
int32_t recv_num = 0;
uint32_t recv_start, timeout_flag = 0U;
uint32_t intr_en_status;
recv_start = csi_tick_get_ms();
dw_uart_regs_t *uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
intr_en_status = dw_uart_get_intr_en_status(uart_base);
dw_uart_disable_recv_irq(uart_base);
while (recv_num < (int32_t)size) {
while (!dw_uart_getready(uart_base)) {
if ((csi_tick_get_ms() - recv_start) >= timeout) {
timeout_flag = 1U;
break;
}
};
if (timeout_flag == 0U) {
*temp_data = dw_uart_getchar(uart_base);
temp_data++;
recv_num++;
recv_start = csi_tick_get_ms();
} else {
break;
}
}
dw_uart_set_intr_en_status(uart_base, intr_en_status);
return recv_num;
}
csi_error_t dw_uart_receive_intr(csi_uart_t *uart, void *data, uint32_t num)
{
dw_uart_regs_t *uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
uart->rx_data = (uint8_t *)data;
uart->rx_size = num;
dw_uart_enable_recv_irq(uart_base);
return CSI_OK;
}
csi_error_t csi_uart_receive_async(csi_uart_t *uart, void *data, uint32_t size)
{
CSI_PARAM_CHK(uart, CSI_ERROR);
CSI_PARAM_CHK(data, CSI_ERROR);
CSI_PARAM_CHK(uart->callback, CSI_ERROR);
CSI_PARAM_CHK(uart->receive, CSI_ERROR);
csi_error_t ret;
ret = uart->receive(uart, data, size);
if (ret == CSI_OK) {
uart->state.readable = 0U;
}
return ret;
}
int32_t csi_uart_send(csi_uart_t *uart, const void *data, uint32_t size, uint32_t timeout)
{
/* check data and uart */
CSI_PARAM_CHK(uart, CSI_ERROR);
CSI_PARAM_CHK(data, CSI_ERROR);
CSI_PARAM_CHK(size, CSI_ERROR);
dw_uart_regs_t *uart_base;
uint8_t *ch = (uint8_t *)data;
int32_t trans_num = 0;
uint32_t send_start, timeout_flag = 0U;
uint32_t intr_en_status;
uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
/* store the status of intr */
intr_en_status = dw_uart_get_intr_en_status(uart_base);
dw_uart_disable_trans_irq(uart_base);
send_start = csi_tick_get_ms();
while (trans_num < (int32_t)size) {
while (!dw_uart_putready(uart_base)) {
if ((csi_tick_get_ms() - send_start) >= timeout) {
timeout_flag = 1U;
break;
}
};
if (timeout_flag == 0U) {
dw_uart_putchar(uart_base, *ch++);
/* update the timeout */
send_start = csi_tick_get_ms();
trans_num++;
} else {
break;
}
}
dw_uart_set_intr_en_status(uart_base, intr_en_status);
return trans_num;
}
csi_error_t dw_uart_send_intr(csi_uart_t *uart, const void *data, uint32_t size)
{
dw_uart_regs_t *uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
uart->tx_data = (uint8_t *)data;
uart->tx_size = size;
dw_uart_enable_trans_irq(uart_base);
return CSI_OK;
}
csi_error_t csi_uart_send_async(csi_uart_t *uart, const void *data, uint32_t size)
{
CSI_PARAM_CHK(uart, CSI_ERROR);
CSI_PARAM_CHK(data, CSI_ERROR);
CSI_PARAM_CHK(uart->callback, CSI_ERROR);
CSI_PARAM_CHK(uart->send, CSI_ERROR);
csi_error_t ret;
ret = uart->send(uart, data, size);
if (ret == CSI_OK) {
uart->state.writeable = 0U;
}
return ret;
}
csi_error_t csi_uart_attach_callback(csi_uart_t *uart, void *callback, void *arg)
{
CSI_PARAM_CHK(uart, CSI_ERROR);
dw_uart_regs_t *uart_base;
uart_base = (dw_uart_regs_t *)HANDLE_REG_BASE(uart);
uart->callback = callback;
uart->arg = arg;
uart->send = dw_uart_send_intr;
uart->receive = dw_uart_receive_intr;
csi_irq_attach((uint32_t)(uart->dev.irq_num), &dw_uart_irq_handler, &uart->dev);
csi_irq_enable((uint32_t)(uart->dev.irq_num));
dw_uart_enable_recv_irq(uart_base);
return CSI_OK;
}
void csi_uart_detach_callback(csi_uart_t *uart)
{
CSI_PARAM_CHK_NORETVAL(uart);
dw_uart_regs_t *uart_base;
uart_base = (dw_uart_regs_t *)HANDLE_REG_BASE(uart);
uart->callback = NULL;
uart->arg = NULL;
uart->send = NULL;
uart->receive = NULL;
dw_uart_disable_recv_irq(uart_base);
csi_irq_disable((uint32_t)(uart->dev.irq_num));
csi_irq_detach((uint32_t)(uart->dev.irq_num));
}
csi_error_t csi_uart_get_state(csi_uart_t *uart, csi_state_t *state)
{
CSI_PARAM_CHK(uart, CSI_ERROR);
CSI_PARAM_CHK(state, CSI_ERROR);
*state = uart->state;
return CSI_OK;
}
static void dw_uart_dma_event_cb(csi_dma_ch_t *dma, csi_dma_event_t event, void *arg)
{
csi_uart_t *uart = (csi_uart_t *)dma->parent;
dw_uart_regs_t *uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
if (event == DMA_EVENT_TRANSFER_ERROR) {/* DMA transfer ERROR */
if ((uart->tx_dma != NULL) && (uart->tx_dma->ch_id == dma->ch_id)) {
csi_dma_ch_stop(dma);
dw_uart_set_tx_etb_func(uart_base, 0U);
dw_uart_fifo_init(uart_base);
uart->state.writeable = 1U;
if (uart->callback) {
uart->callback(uart, UART_EVENT_ERROR_OVERFLOW, uart->arg);
}
} else {
csi_dma_ch_stop(dma);
dw_uart_set_rx_etb_func(uart_base, 0U);
dw_uart_fifo_init(uart_base);
/* enable received data available */
dw_uart_enable_recv_irq(uart_base);
uart->state.readable = 1U;
if (uart->callback) {
uart->callback(uart, UART_EVENT_ERROR_FRAMING, uart->arg);
}
}
} else if (event == DMA_EVENT_TRANSFER_DONE) {/* DMA transfer complete */
if ((uart->tx_dma != NULL) && (uart->tx_dma->ch_id == dma->ch_id)) {
while(1) {
if (dw_uart_get_trans_fifo_waiting_data(uart_base) == 0U) {
break;
}
}
dw_uart_set_tx_etb_func(uart_base, 0U);
csi_dma_ch_stop(dma);
dw_uart_fifo_init(uart_base);
uart->state.writeable = 1U;
if (uart->callback) {
uart->callback(uart, UART_EVENT_SEND_COMPLETE, uart->arg);
}
} else {
dw_uart_set_rx_etb_func(uart_base, 0U);
csi_dma_ch_stop(dma);
dw_uart_fifo_init(uart_base);
/* enable received data available */
dw_uart_enable_recv_irq(uart_base);
uart->state.readable = 1U;
if (uart->callback) {
uart->callback(uart, UART_EVENT_RECEIVE_COMPLETE, uart->arg);
}
}
}
}
csi_error_t dw_uart_send_dma(csi_uart_t *uart, const void *data, uint32_t num)
{
csi_dma_ch_config_t config;
memset(&config, 0, sizeof(csi_dma_ch_config_t));
uint32_t fcr_reg = UART_FIFO_INIT_CONFIG;
dw_uart_regs_t *uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
csi_dma_ch_t *dma_ch = (csi_dma_ch_t *)uart->tx_dma;
uart->tx_data = (uint8_t *)data;
uart->tx_size = num;
dw_uart_disable_recv_irq(uart_base);
dw_uart_disable_trans_irq(uart_base);
config.src_inc = DMA_ADDR_INC;
config.dst_inc = DMA_ADDR_CONSTANT;
config.src_tw = DMA_DATA_WIDTH_8_BITS;
config.dst_tw = DMA_DATA_WIDTH_8_BITS;
/* config for wj_dma */
config.group_len = find_max_prime_num(num);
config.trans_dir = DMA_MEM2PERH;
/* config for etb */
config.handshake = uart_tx_hs_num[uart->dev.idx];
csi_dma_ch_config(dma_ch, &config);
fcr_reg &= ~(DW_UART_FCR_TET_Msk);
if (config.group_len >= (UART_MAX_FIFO / 2U)) {
fcr_reg |= DW_UART_FCR_TET_FIFO_1_2_FULL;
} else if (config.group_len >= (UART_MAX_FIFO / 4U)) {
fcr_reg |= DW_UART_FCR_TET_FIFO_1_4_FULL;
} else if (config.group_len >= (UART_MAX_FIFO / 8U)) {
fcr_reg |= DW_UART_FCR_TET_FIFO_2_CHAR;
} else {
fcr_reg |= DW_UART_FCR_TET_FIFO_EMTPY;
}
soc_dcache_clean_invalid_range((unsigned long)uart->tx_data, uart->tx_size);
dw_uart_set_fcr_reg(uart_base, fcr_reg);
csi_dma_ch_start(uart->tx_dma, (void *)uart->tx_data, (uint8_t *) & (uart_base->THR), uart->tx_size);
dw_uart_set_tx_etb_func(uart_base, DW_UART_HTX_TX_ETB_FUNC_EN);
return CSI_OK;
}
csi_error_t dw_uart_receive_dma(csi_uart_t *uart, void *data, uint32_t num)
{
csi_dma_ch_config_t config;
memset(&config, 0, sizeof(csi_dma_ch_config_t));
csi_error_t ret = CSI_OK;
uint32_t fcr_reg = UART_FIFO_INIT_CONFIG;
dw_uart_regs_t *uart_base = (dw_uart_regs_t *)uart->dev.reg_base;
csi_dma_ch_t *dma = (csi_dma_ch_t *)uart->rx_dma;
dw_uart_disable_trans_irq(uart_base);
dw_uart_disable_recv_irq(uart_base);
uart->rx_data = (uint8_t *)data;
uart->rx_size = num;
config.src_inc = DMA_ADDR_CONSTANT;
config.dst_inc = DMA_ADDR_INC;
config.src_tw = DMA_DATA_WIDTH_8_BITS;
config.dst_tw = DMA_DATA_WIDTH_8_BITS;
config.group_len = find_max_prime_num(num);
config.trans_dir = DMA_PERH2MEM;
config.handshake = uart_rx_hs_num[uart->dev.idx];
ret = csi_dma_ch_config(dma, &config);
if (ret == CSI_OK) {
fcr_reg &= ~(DW_UART_FCR_RT_Msk);
if (config.group_len >= (UART_MAX_FIFO / 2U)) {
fcr_reg |= DW_UART_FCR_RT_FIFO_1_2_FULL;
} else if (config.group_len >= (UART_MAX_FIFO / 4U)) {
fcr_reg |= DW_UART_FCR_RT_FIFO_1_4_FULL;
} else {
fcr_reg |= DW_UART_FCR_RT_FIFO_1_CHAR;
}
soc_dcache_clean_invalid_range((unsigned long)uart->rx_data, uart->rx_size);
dw_uart_set_fcr_reg(uart_base, fcr_reg | DW_UART_FCR_RFIFOR_RESET);
csi_dma_ch_start(uart->rx_dma, (uint8_t *) & (uart_base->RBR), (void *)uart->rx_data, uart->rx_size);
dw_uart_set_rx_etb_func(uart_base, DW_UART_HTX_RX_ETB_FUNC_EN);
}
return ret;
}
csi_error_t csi_uart_link_dma(csi_uart_t *uart, csi_dma_ch_t *tx_dma, csi_dma_ch_t *rx_dma)
{
CSI_PARAM_CHK(uart, CSI_ERROR);
CSI_PARAM_CHK(uart->callback, CSI_ERROR);
csi_error_t ret = CSI_OK;
if (tx_dma != NULL) {
tx_dma->parent = uart;
ret = csi_dma_ch_alloc(tx_dma, -1, -1);
if (ret == CSI_OK) {
csi_dma_ch_attach_callback(tx_dma, dw_uart_dma_event_cb, NULL);
uart->tx_dma = tx_dma;
uart->send = dw_uart_send_dma;
} else {
tx_dma->parent = NULL;
}
} else {
if (uart->tx_dma) {
csi_dma_ch_detach_callback(uart->tx_dma);
csi_dma_ch_free(uart->tx_dma);
uart->tx_dma = NULL;
}
uart->send = dw_uart_send_intr;
}
if (ret != CSI_ERROR) {
if (rx_dma != NULL) {
rx_dma->parent = uart;
ret = csi_dma_ch_alloc(rx_dma, -1, -1);
if (ret == CSI_OK) {
csi_dma_ch_attach_callback(rx_dma, dw_uart_dma_event_cb, NULL);
uart->rx_dma = rx_dma;
uart->receive = dw_uart_receive_dma;
} else {
rx_dma->parent = NULL;
}
} else {
if (uart->rx_dma) {
csi_dma_ch_detach_callback(uart->rx_dma);
csi_dma_ch_free(uart->rx_dma);
uart->rx_dma = NULL;
}
uart->receive = dw_uart_receive_intr;
}
}
return ret;
}
ATTRIBUTE_DATA static void rx_pin_uart_to_gpio(uint8_t dev_idx, pin_name_t *rx_pin)
{
CSI_ASSERT(rx_pin);
int32_t cnt;
for (cnt = 0; uart_pinmap[cnt].idx != 0xFFU; cnt++) {
if ((dev_idx == uart_pinmap[cnt].idx) && \
(csi_pin_get_mux(uart_pinmap[cnt].pin_name) == uart_pinmap[cnt].pin_func) && \
(uart_pinmap[cnt].channel == (uint8_t)PIN_UART_RX)) {
csi_pin_set_mux(uart_pinmap[cnt].pin_name, PIN_FUNC_GPIO);
*rx_pin = uart_pinmap[cnt].pin_name;
break;
}
}
}
ATTRIBUTE_DATA static void tx_pin_uart_to_gpio(uint8_t dev_idx, pin_name_t *tx_pin)
{
CSI_ASSERT(tx_pin);
int32_t cnt;
for (cnt = 0; uart_pinmap[cnt].idx != 0xFFU; cnt++) {
if ((dev_idx == uart_pinmap[cnt].idx) && \
(csi_pin_get_mux(uart_pinmap[cnt].pin_name) == uart_pinmap[cnt].pin_func) && \
(uart_pinmap[cnt].channel == (uint8_t)PIN_UART_TX)) {
csi_pin_set_mux(uart_pinmap[cnt].pin_name, PIN_FUNC_GPIO);
*tx_pin = uart_pinmap[cnt].pin_name;
break;
}
}
}
ATTRIBUTE_DATA static void rx_pin_gpio_to_uart(uint8_t dev_idx, pin_name_t *rx_pin)
{
int32_t cnt;
for (cnt = 0; uart_pinmap[cnt].idx != 0xFFU; cnt++) {
if ((*rx_pin == uart_pinmap[cnt].pin_name) && \
(dev_idx == uart_pinmap[cnt].idx)) {
csi_pin_set_mux(uart_pinmap[cnt].pin_name, uart_pinmap[cnt].pin_func);
break;
}
}
}
ATTRIBUTE_DATA static void tx_pin_gpio_to_uart(uint8_t dev_idx, pin_name_t *tx_pin)
{
int32_t cnt;
for (cnt = 0; uart_pinmap[cnt].idx != 0xFFU; cnt++) {
if ((*tx_pin == uart_pinmap[cnt].pin_name) && \
(dev_idx == uart_pinmap[cnt].idx)) {
csi_pin_set_mux(uart_pinmap[cnt].pin_name, uart_pinmap[cnt].pin_func);
break;
}
}
}
#ifdef CONFIG_PM
csi_error_t dw_uart_pm_action(csi_dev_t *dev, csi_pm_dev_action_t action)
{
CSI_PARAM_CHK(dev, CSI_ERROR);
csi_error_t ret = CSI_OK;
csi_pm_dev_t *pm_dev = &dev->pm_dev;
dw_uart_regs_t *uart_base = (dw_uart_regs_t *)dev->reg_base;
switch (action) {
case PM_DEV_SUSPEND:
dw_uart_fifo_disable(uart_base);
dw_uart_fifo_enable(uart_base);
dw_uart_wait_idle(uart_base);
uart_base->LCR |= DW_UART_LCR_DLAB_EN;
csi_pm_dev_save_regs(pm_dev->reten_mem, (uint32_t *)dev->reg_base, 2U);
uart_base->LCR &= (~DW_UART_LCR_DLAB_EN);
csi_pm_dev_save_regs(pm_dev->reten_mem + 2, (uint32_t *)(dev->reg_base + 4U), 1U);
csi_pm_dev_save_regs(pm_dev->reten_mem + 2 + 1, (uint32_t *)(dev->reg_base + 12U), 2U);
break;
case PM_DEV_RESUME:
dw_uart_fifo_disable(uart_base);
dw_uart_fifo_enable(uart_base);
dw_uart_wait_idle(uart_base);
uart_base->LCR |= DW_UART_LCR_DLAB_EN;
csi_pm_dev_restore_regs(pm_dev->reten_mem, (uint32_t *)dev->reg_base, 2U);
uart_base->LCR &= (~DW_UART_LCR_DLAB_EN);
csi_pm_dev_restore_regs(pm_dev->reten_mem + 2, (uint32_t *)(dev->reg_base + 4U), 1U);
csi_pm_dev_restore_regs(pm_dev->reten_mem + 2 + 1, (uint32_t *)(dev->reg_base + 12U), 2U);
break;
default:
ret = CSI_ERROR;
break;
}
return ret;
}
csi_error_t csi_uart_enable_pm(csi_uart_t *uart)
{
return csi_pm_dev_register(&uart->dev, dw_uart_pm_action, 20U, 0U);
}
void csi_uart_disable_pm(csi_uart_t *uart)
{
csi_pm_dev_unregister(&uart->dev);
}
#endif
| 28.912759 | 133 | 0.621808 |
cbe7757402649c51e596e4fcb76e5012ece8f0a4 | 265 | h | C | A_Category/Classes/CTMediator+A.h | TouchFriend/A_Category | 9780118e3e07be15c0d4cb06b29a2f83901447f6 | [
"Apache-2.0"
] | null | null | null | A_Category/Classes/CTMediator+A.h | TouchFriend/A_Category | 9780118e3e07be15c0d4cb06b29a2f83901447f6 | [
"Apache-2.0"
] | null | null | null | A_Category/Classes/CTMediator+A.h | TouchFriend/A_Category | 9780118e3e07be15c0d4cb06b29a2f83901447f6 | [
"Apache-2.0"
] | null | null | null | //
// CTMediator+A.h
// A_Category
//
// Created by TouchWorld on 2020/8/26.
//
#import <UIKit/UIKit.h>
#import <CTMediator/CTMediator.h>
NS_ASSUME_NONNULL_BEGIN
@interface CTMediator (A)
- (UIViewController *)A_aViewController;
@end
NS_ASSUME_NONNULL_END
| 13.25 | 40 | 0.732075 |
c4d77fc6a46fccacacf4f37134b665144d5ddf3c | 212 | h | C | game/data/KeyMap.h | samuelgrigolato/miniplat | 485ae950c21f8b4202b1a7461b928ba64931e12e | [
"MIT"
] | null | null | null | game/data/KeyMap.h | samuelgrigolato/miniplat | 485ae950c21f8b4202b1a7461b928ba64931e12e | [
"MIT"
] | null | null | null | game/data/KeyMap.h | samuelgrigolato/miniplat | 485ae950c21f8b4202b1a7461b928ba64931e12e | [
"MIT"
] | null | null | null | #ifndef KEYMAP_H
#define KEYMAP_H
#include <stdint.h>
namespace game {
namespace data {
struct KeyMap {
int32_t up;
int32_t left;
int32_t down;
int32_t right;
int32_t fire;
};
}}
#endif
| 10.095238 | 19 | 0.65566 |
4fbd0eb59009562711a4f6083e5f3648ad6b78f7 | 4,117 | c | C | src/pipe/modules/nprof/main.c | CarVac/vkdt | deabfdfc43bcc6a5f1aaea720695bbd597a87561 | [
"BSD-2-Clause"
] | null | null | null | src/pipe/modules/nprof/main.c | CarVac/vkdt | deabfdfc43bcc6a5f1aaea720695bbd597a87561 | [
"BSD-2-Clause"
] | null | null | null | src/pipe/modules/nprof/main.c | CarVac/vkdt | deabfdfc43bcc6a5f1aaea720695bbd597a87561 | [
"BSD-2-Clause"
] | null | null | null | #include "modules/api.h"
#include "../core/core.h" // for MIN
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
void modify_roi_in(
dt_graph_t *graph,
dt_module_t *module)
{
dt_roi_t *r = &module->connector[0].roi;
r->wd = r->full_wd;
r->ht = r->full_ht;
r->x = 0;
r->y = 0;
r->scale = 1.0f;
}
// called after pipeline finished up to here.
// our input buffer will come in memory mapped.
void write_sink(
dt_module_t *module,
void *buf)
{
// read back uint32_t buffer, fit line to noise, output param a and param b to file with our maker model iso:
uint32_t *p32 = buf;
const int wd = module->connector[0].roi.wd;
// const int ht = module->connector[0].roi.ht; == 4
// examine pairs of samples and find out whether one position likely leads to
// an outlier or not.
int valid_cnt = 0;
int *valid = malloc(wd * sizeof(int));
for(int f=0;f<5;f++)
{
double fk = 1.0 / (f+1.0);
for(int i=0;i<wd;i++)
{
int score = 0;
for(int j=i+1;j<MIN(i+fk*50, wd);j++)
{
// fit: sigma^2 = y = a + b*x
// to the data (x,y) by looking at pairs of (x,y) from the input. x is
// the first moment of the observed raw histogram - black level. in
// particular this means that the noise model is to be applied to
// uint16_t range x that have the black level subtracted, but have not
// been rescaled to white in any way.
// make sure a and b are positive, reject sample otherwise
double c = p32[i + 0*wd];
if(c < fk*50) continue;
double m1 = p32[i + 1*wd];
double m2 = p32[i + 2*wd];
double x1 = m1/c - module->img_param.black[1];
double y1 = m2/c - m1/c*m1/c;
c = p32[j + 0*wd];
if(c < fk*50) continue;
m1 = p32[j + 1*wd];
m2 = p32[j + 2*wd];
double x2 = m1/c - module->img_param.black[1];
double y2 = m2/c - m1/c*m1/c;
if(y1 <= 0 || y2 <= 0) continue;
double b = (y2-y1)/(x2-x1);
if(!(b > 0.0)) continue;
double a = y1 - x1 * b;
if(!(a > 0.0)) continue;
if(!(a < 35000.0)) continue; // half the range noise? that would be extraordinary
if(++score > fk*25) // count a valid sample for this i
{
valid[valid_cnt++] = i;
break; // no more j loop needed
}
}
}
if(valid_cnt) break; // yay, no need to lower quality standards
fprintf(stderr, "[nprof] WARN: reducing expectations %dx because we collected very few valid samples!\n", f+1);
}
if(valid_cnt <= 0)
fprintf(stderr, "[nprof] ERR: could not find a single valid sample!\n");
// incredibly simplistic linear regression from stack overflow.
// compute covariance matrix and from that the parameters a, b:
// FILE *d = fopen("test.dat", "wb");
double sx = 0.0, sx2 = 0.0, sy = 0.0, sy2 = 0.0, sxy = 0.0;
double cnt = 0.0;
double white = log2(module->img_param.white[1])/16.0f;
double black = log2(module->img_param.black[1])/16.0f;
for(int ii=0;ii<valid_cnt;ii++)
{
int i = valid[ii];
double c = p32[i + 0*wd];
double x = exp2((i / (double)wd * (white - black) + black) * 16.0) - module->img_param.black[1];
double m1 = p32[i + 1*wd];
// double x = m1/c - module->img_param.black[1]; // agrees with x as above
double m2 = p32[i + 2*wd];
double y = m2/c - m1/c*m1/c;
cnt += c;
sx += x * c;
sx2 += x*x * c;
sy += y * c;
sy2 += y*y * c;
sxy += x*y * c;
// fprintf(d, "%g %g\n", x, y);
}
// fclose(d);
float denom = cnt * sx2 - sx*sx;
// TODO: catch / 0 or overflows
float b = (cnt * sxy - sx * sy) / denom;
float a = (sy * sx2 - sx * sxy) / denom;
char filename[512];
snprintf(filename, sizeof(filename), "%s-%s-%d.nprof",
module->img_param.maker,
module->img_param.model,
(int)module->img_param.iso);
fprintf(stdout, "[nprof] writing '%s'\n", filename);
FILE* f = fopen(filename, "wb");
if(f)
{
fprintf(f, "%g %g\n", a, b);
fclose(f);
}
free(valid);
}
| 30.954887 | 115 | 0.565217 |
ae11ac266fcb5e974bf3c60007a94533549660ff | 1,344 | h | C | src/telemetry/telemetry.h | dianasaur323/timescaledb | eecd845781fcc4163d1f9e81d18f116b77a561f8 | [
"Apache-2.0"
] | null | null | null | src/telemetry/telemetry.h | dianasaur323/timescaledb | eecd845781fcc4163d1f9e81d18f116b77a561f8 | [
"Apache-2.0"
] | null | null | null | src/telemetry/telemetry.h | dianasaur323/timescaledb | eecd845781fcc4163d1f9e81d18f116b77a561f8 | [
"Apache-2.0"
] | 1 | 2019-03-26T17:34:11.000Z | 2019-03-26T17:34:11.000Z | /*
* Copyright (c) 2016-2018 Timescale, Inc. All Rights Reserved.
*
* This file is licensed under the Apache License,
* see LICENSE-APACHE at the top level directory.
*/
#ifndef TIMESCALEDB_TELEMETRY_TELEMETRY_H
#define TIMESCALEDB_TELEMETRY_TELEMETRY_H
#include <postgres.h>
#include <fmgr.h>
#include <pg_config.h> // To get USE_OPENSSL from postgres build
#include <utils/builtins.h>
#include "compat.h"
#include "version.h"
#include "net/conn.h"
#include "net/http.h"
#include "utils.h"
#define TELEMETRY_SCHEME "https"
#define TELEMETRY_HOST "telemetry.timescale.com"
#define TELEMETRY_PATH "/v1/metrics"
#define MAX_VERSION_STR_LEN 128
typedef struct VersionResult
{
const char *versionstr;
const char *errhint;
} VersionResult;
extern HttpRequest *ts_build_version_request(const char *host, const char *path);
extern Connection *ts_telemetry_connect(const char *host, const char *service);
extern bool ts_validate_server_version(const char *json, VersionResult *result);
/*
* This function is intended as the main function for a BGW.
* Its job is to send metrics and fetch the most up-to-date version of
* Timescale via HTTPS.
*/
extern bool ts_telemetry_main(const char *host, const char *path, const char *service);
extern bool ts_telemetry_main_wrapper(void);
#endif /* TIMESCALEDB_TELEMETRY_TELEMETRY_H */
| 29.866667 | 87 | 0.769345 |
3ec16d0b57952bfa056454d0d51112c9711539cf | 59 | c | C | build/CMakeFiles/FortranCInterface/symbols/MYSUB-UPPER.c | ralic/lapack_highSierra | 23cb5b8e70df129f7c249af6859ec94a161ef77e | [
"BSD-3-Clause-Open-MPI"
] | 2 | 2020-04-30T19:13:08.000Z | 2021-04-14T19:40:47.000Z | build/CMakeFiles/FortranCInterface/symbols/MYSUB-UPPER.c | ralic/lapack_highSierra | 23cb5b8e70df129f7c249af6859ec94a161ef77e | [
"BSD-3-Clause-Open-MPI"
] | 1 | 2020-04-30T19:19:09.000Z | 2020-05-02T14:22:36.000Z | build/CMakeFiles/FortranCInterface/symbols/MYSUB-UPPER.c | ralic/lapack_highSierra | 23cb5b8e70df129f7c249af6859ec94a161ef77e | [
"BSD-3-Clause-Open-MPI"
] | 1 | 2020-06-12T08:51:24.000Z | 2020-06-12T08:51:24.000Z | const char* MYSUB(void)
{
return "INFO:symbol[MYSUB]";
}
| 11.8 | 30 | 0.661017 |
48ccacfd78bbaa815f408730da29f79aadc7880d | 831 | h | C | src/editor_json.h | paladin-t/bitty | a09d45ba5b0f038d19d80ef7b98d8342d328bccc | [
"BSD-3-Clause"
] | 63 | 2020-10-22T10:31:00.000Z | 2022-03-25T15:54:14.000Z | src/editor_json.h | paladin-t/bitty | a09d45ba5b0f038d19d80ef7b98d8342d328bccc | [
"BSD-3-Clause"
] | 1 | 2021-05-08T18:14:06.000Z | 2021-05-08T18:14:06.000Z | src/editor_json.h | paladin-t/bitty | a09d45ba5b0f038d19d80ef7b98d8342d328bccc | [
"BSD-3-Clause"
] | 6 | 2021-03-09T07:20:53.000Z | 2022-02-13T05:23:52.000Z | /*
** Bitty
**
** An itty bitty game engine.
**
** Copyright (C) 2020 - 2021 Tony Wang, all rights reserved
**
** For the latest info, see https://github.com/paladin-t/bitty/
*/
#ifndef __EDITOR_JSON_H__
#define __EDITOR_JSON_H__
#include "editable.h"
/*
** {===========================================================================
** JSON editor
*/
class EditorJson : public Editable, public virtual Object {
public:
BITTY_CLASS_TYPE('J', 'S', 'N', 'E')
/**
* @param[out] len
*/
virtual const char* text(size_t* len) const = 0;
virtual void text(const char* txt, size_t len = 0) = 0;
static EditorJson* create(void);
static void destroy(EditorJson* ptr);
};
/* ===========================================================================} */
#endif /* __EDITOR_JSON_H__ */
| 21.868421 | 83 | 0.509025 |
85e2b38c65196116c4a0051bb0fe28e3eebbd625 | 777 | h | C | DataCollector/mozilla/xulrunner-sdk/include/mozilla/dom/AtomList.h | andrasigneczi/TravelOptimiser | b08805f97f0823fd28975a36db67193386aceb22 | [
"Apache-2.0"
] | 1 | 2016-04-20T08:35:44.000Z | 2016-04-20T08:35:44.000Z | DataCollector/mozilla/xulrunner-sdk/include/mozilla/dom/AtomList.h | andrasigneczi/TravelOptimiser | b08805f97f0823fd28975a36db67193386aceb22 | [
"Apache-2.0"
] | null | null | null | DataCollector/mozilla/xulrunner-sdk/include/mozilla/dom/AtomList.h | andrasigneczi/TravelOptimiser | b08805f97f0823fd28975a36db67193386aceb22 | [
"Apache-2.0"
] | null | null | null | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_dom_AtomList_h
#define mozilla_dom_AtomList_h
#include "jsapi.h"
#include "mozilla/dom/GeneratedAtomList.h"
namespace mozilla {
namespace dom {
template<class T>
T* GetAtomCache(JSContext* aCx)
{
JSRuntime* rt = JS_GetRuntime(aCx);
auto atomCache = static_cast<PerThreadAtomCache*>(JS_GetRuntimePrivate(rt));
return static_cast<T*>(atomCache);
}
} // namespace dom
} // namespace mozilla
#endif // mozilla_dom_AtomList_h
| 25.9 | 79 | 0.714286 |
e3084f434475b39b3bc6020920c911e899f65afa | 3,386 | h | C | Source/Samples/81_Constraint6DoF/HoverBike.h | elix22/Urho3D-Constraint6DoF | 31422fa6f9558b837ca2b819927896aee5ee2ce5 | [
"MIT"
] | 2 | 2019-10-20T10:04:20.000Z | 2021-09-07T16:56:04.000Z | Source/Samples/81_Constraint6DoF/HoverBike.h | elix22/Urho3D-Constraint6DoF | 31422fa6f9558b837ca2b819927896aee5ee2ce5 | [
"MIT"
] | null | null | null | Source/Samples/81_Constraint6DoF/HoverBike.h | elix22/Urho3D-Constraint6DoF | 31422fa6f9558b837ca2b819927896aee5ee2ce5 | [
"MIT"
] | 2 | 2019-10-20T04:42:34.000Z | 2019-10-28T13:26:06.000Z | //
// Copyright (c) 2008-2019 the Urho3D project.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#pragma once
#include <Urho3D/Scene/LogicComponent.h>
#include <Urho3D/Input/Controls.h>
#include <Urho3D/Physics/RigidBody.h>
#include <Bullet/BulletDynamics/Vehicle/btRaycastVehicle.h>
namespace Urho3D
{
class Node;
class Scene;
class PhysicsWorld;
}
using namespace Urho3D;
//=============================================================================
//=============================================================================
const int CTRL_FORWARD = 1;
const int CTRL_BACK = 2;
const int CTRL_LEFT = 4;
const int CTRL_RIGHT = 8;
const int CTRL_SPACE = 16;
//=============================================================================
//=============================================================================
class HoverBike : public LogicComponent
{
URHO3D_OBJECT(HoverBike, LogicComponent);
public:
HoverBike(Context* context);
virtual ~HoverBike();
static void RegisterObject(Context* context);
virtual void OnSetAttribute(const AttributeInfo& attr, const Variant& src);
virtual void DelayedStart();
virtual void FixedUpdate(float timeStep);
bool Create();
/// Movement controls.
Controls controls_;
protected:
void CreateRaycastVehicle();
void AddWheel(const String &hubNodeName, bool isFrontWheel);
void GetContraintNode();
void UpdateConstraint(float engineForce);
protected:
float engineForce_;
float maxSpeed_;
float minBrakeForce_;
float maxBrakeForce_;
float minBrakeSpeed_;
float wheelRadius_;
float wheelFriction_;
float steeringIncrement_;
float steeringClamp_;
float maxRollAngle_;
float maxSpeedToMaxRollAngle_;
float suspensionRestLength_;
float suspensionStiffness_;
float suspensionRelaxation_;
float suspensionCompression_;
float rollInfluence_;
String constraintName_;
float softPitchLimit_;
WeakPtr<RigidBody> rigidBody_;
WeakPtr<PhysicsWorld> physicsWorld_;
WeakPtr<Node> nodeConstraint6DoF_;
btRaycastVehicle::btVehicleTuning vehicleTuning_;
btVehicleRaycaster *vehicleRaycaster_;
btRaycastVehicle *raycastVehicle_;
float currentSteering_;
};
| 30.781818 | 80 | 0.660957 |
8bbb8d963213b1797b3b7db1a6017dc06915b96e | 1,023 | c | C | gempak/source/driver/active/vg/vsspln.c | oxelson/gempak | e7c477814d7084c87d3313c94e192d13d8341fa1 | [
"BSD-3-Clause"
] | 42 | 2015-06-03T15:26:21.000Z | 2022-02-28T22:36:03.000Z | gempak/source/driver/active/vg/vsspln.c | oxelson/gempak | e7c477814d7084c87d3313c94e192d13d8341fa1 | [
"BSD-3-Clause"
] | 60 | 2015-05-11T21:36:08.000Z | 2022-03-29T16:22:42.000Z | gempak/source/driver/active/vg/vsspln.c | oxelson/gempak | e7c477814d7084c87d3313c94e192d13d8341fa1 | [
"BSD-3-Clause"
] | 27 | 2016-06-06T21:55:14.000Z | 2022-03-18T18:23:28.000Z | #include "vgcmn.h"
void vsspln ( int *msltyp, int *mslstr, int *msldir, float *tslsiz,
int *mslwid, int *iret )
/************************************************************************
* vsspln *
* *
* This subroutine sets the special line attributes. *
* *
* vsspln ( msltyp, mslstr, msldir, tslsiz, mslwid, iret ) *
* *
* Input parameters: *
* *msltyp int Special line type *
* *mslstr int Special line stroke multiplier *
* *msldir int Special line direction indicator*
* *tslsiz float Special line size *
* *mslwid int Special line width *
* *
* Output parameters: *
* *iret int Return code *
** *
* Log: *
* D. Keiser/GSC 4/97 *
***********************************************************************/
{
/*---------------------------------------------------------------------*/
*iret = G_NORMAL;
ksltyp = *msltyp;
kslstr = *mslstr;
ksldir = *msldir;
rslsiz = *tslsiz;
kslwid = *mslwid;
}
| 27.648649 | 73 | 0.439883 |
2725683d26baee2bf68aa7646e79035d5592efef | 813 | h | C | XmilsSDKDo/Classes/SceneAdSdk.framework/Headers/SceneAdSdkExtRewardListModel.h | Ben21hao/XmilsSDKDo | d72090fd8e5f9991388d23d87c18572bd7f3bbae | [
"MIT"
] | null | null | null | XmilsSDKDo/Classes/SceneAdSdk.framework/Headers/SceneAdSdkExtRewardListModel.h | Ben21hao/XmilsSDKDo | d72090fd8e5f9991388d23d87c18572bd7f3bbae | [
"MIT"
] | null | null | null | XmilsSDKDo/Classes/SceneAdSdk.framework/Headers/SceneAdSdkExtRewardListModel.h | Ben21hao/XmilsSDKDo | d72090fd8e5f9991388d23d87c18572bd7f3bbae | [
"MIT"
] | null | null | null | //
// SceneAdSdkExtRewardListModel.h
// SceneAdSdk
//
// Created by caizifeng on 2019/6/27.
// Copyright © 2019 gaven. All rights reserved.
//
#import <Foundation/Foundation.h>
@interface UserExtReward : NSObject
@property (nonatomic , assign) NSInteger answerTimesLimit; //答题次数限制
@property (nonatomic , assign) NSInteger lessAnswerTimes; //距离领取奖励答题次数
@property (nonatomic , assign) NSInteger level; //level 额外奖励等级
@property (nonatomic , assign) NSInteger status; //0:未完成 1:已完成,2:已领取
@property (nonatomic , assign) NSInteger awardCoin; //奖励金币
@end
@interface SceneAdSdkExtRewardListModel : NSObject
@property (nonatomic , assign) NSInteger userAnswerRightTimes; //用户成语答题正确次数
@property (nonatomic , strong)NSMutableArray<UserExtReward *> *userExtRewardList;
@end
| 33.875 | 84 | 0.728167 |
6a4f98fdabfaf71ad3ce1a6279fc186ad8485b49 | 1,857 | c | C | factorial/factorial.c | ianfoo/c-examples | 7c7adfa982665b830b985618f01b41e2d8b80109 | [
"MIT"
] | null | null | null | factorial/factorial.c | ianfoo/c-examples | 7c7adfa982665b830b985618f01b41e2d8b80109 | [
"MIT"
] | null | null | null | factorial/factorial.c | ianfoo/c-examples | 7c7adfa982665b830b985618f01b41e2d8b80109 | [
"MIT"
] | null | null | null | #include <stdio.h>
#include <stdlib.h>
#include <errno.h>
unsigned long factorial(int f)
{
if (f <= 1)
{
return 1;
}
return f * factorial(f-1);
}
// 65! is the max that an unsigned long can accommodate.
const int max_input = 65;
long memos[max_input+1];
// factorial_memoed uses the memos array to avoid having to recompute
// factorials that have been previously calculated.
unsigned long factorial_memoed(int f)
{
if (f <= max_input && memos[f] != 0)
{
return memos[f];
}
if (f <= 1)
{
return 1;
}
memos[f] = f * factorial_memoed(f-1);
return memos[f];
}
int main(int argc, char **argv)
{
if (argc < 2)
{
fprintf(stderr, "%s: at least one numerical argument is required\n", argv[0]);
return 1;
}
// Allow the user to set NOMEMO in the environment to force use of the
// naive factorial function. We will invoke using a function pointer to
// the desired implementation.
unsigned long (*factorial_func)(int) = &factorial_memoed;
char *no_memo = getenv("NOMEMO");
if (no_memo != NULL)
{
factorial_func = &factorial;
}
int exit_code = 0;
for (int i=1; i < argc; i++)
{
errno = 0;
int f = atoi(argv[i]);
if (errno)
{
fprintf(stderr, "%s: invalid numerical argument \"%s\"\n", argv[0], argv[i]);
exit_code = 1;
continue;
}
if (f < 0)
{
fprintf(stderr, "%s: numerical argument must be non-negative\n", argv[0]);
exit_code = 1;
continue;
}
if (f > max_input)
{
printf("Sorry, I can only compute up to %d!\n", max_input);
continue;
}
printf("%d! = %lu\n", f, factorial_func(f));
}
return exit_code;
}
| 22.646341 | 89 | 0.546581 |
6a60d2812a89f7688fc38c06278dee85e1480a31 | 9,247 | h | C | src/API_generated/oyCMMapi9_s_.h | Acidburn0zzz/oyranos | b87dcf0e7a88e9b2f5f6c2e3314ddbd3af045575 | [
"BSD-3-Clause"
] | null | null | null | src/API_generated/oyCMMapi9_s_.h | Acidburn0zzz/oyranos | b87dcf0e7a88e9b2f5f6c2e3314ddbd3af045575 | [
"BSD-3-Clause"
] | null | null | null | src/API_generated/oyCMMapi9_s_.h | Acidburn0zzz/oyranos | b87dcf0e7a88e9b2f5f6c2e3314ddbd3af045575 | [
"BSD-3-Clause"
] | null | null | null | /** @file oyCMMapi9_s_.h
[Template file inheritance graph]
+-> oyCMMapi9_s_.template.h
|
+-> oyCMMapiFilter_s_.template.h
|
+-> oyCMMapi_s_.template.h
|
+-- Base_s_.h
* Oyranos is an open source Color Management System
*
* @par Copyright:
* 2004-2018 (C) Kai-Uwe Behrmann
*
* @author Kai-Uwe Behrmann <ku.b@gmx.de>
* @par License:
* new BSD - see: http://www.opensource.org/licenses/BSD-3-Clause
*/
#ifndef OY_CMM_API9_S__H
#define OY_CMM_API9_S__H
#ifdef __cplusplus
/*namespace oyranos {*/
extern "C" {
#endif /* __cplusplus */
#define oyCMMapi9Priv_m( var ) ((oyCMMapi9_s_*) (var))
typedef struct oyCMMapi9_s_ oyCMMapi9_s_;
#include <oyranos_object.h>
#include "oyranos_module_internal.h"
#include "oyCMMinfo_s_.h"
#include "oyCMMapi5_s_.h"
#include "oyCMMobjectType_s.h"
#include "oyConversion_s.h"
#include "oyCMMui_s_.h"
#include "oyCMMapi9_s.h"
/* Include "CMMapi9.private.h" { */
/** typedef oyConversion_Correct_f
* @brief Check for correctly adhering to policies
* @ingroup module_api
* @memberof oyCMMapi9_s
*
* Without any options the module shall
* perform graph analysis and correct the graph.
*
* @see oyConversion_Correct()
*
* @par Typical Options:
* - "command"-"help" - a string option issuing a help text as message
* - "verbose" - reporting changes as message
*
* @param conversion the to be checked configuration
* @param[in] flags for inbuild defaults |
* oyOPTIONSOURCE_FILTER;
* for options marked as advanced |
* oyOPTIONATTRIBUTE_ADVANCED |
* OY_SELECT_FILTER |
* OY_SELECT_COMMON
* @param options options to the policy module
* @return 0 - indifferent, >= 1 - error
* + a message should be sent
*
* @version Oyranos: 0.1.13
* @since 2009/07/23 (Oyranos: 0.1.10)
* @date 2010/11/27
*/
typedef int (*oyConversion_Correct_f) (
oyConversion_s * conversion,
uint32_t flags,
oyOptions_s * options );
/** typedef oyCMMGetFallback_f
* @brief get pattern specific module fallback
* @ingroup module_api
* @memberof oyCMMapi9_s
*
* @param[in] node the node context
* @param[in] flags unused
* @param[in] select_core apply to api4 core else to api7
* @param[in] allocate_func user supplied memory allocator
* @return the fallback string
*
* @version Oyranos: 0.9.6
* @date 2014/06/25
* @since 2014/06/25 (Oyranos: 0.9.6)
*/
typedef char * (*oyCMMGetFallback_f) ( oyFilterNode_s * node,
uint32_t flags,
int select_core,
oyAlloc_f allocate_func );
/** typedef oyCMMRegistrationToName_f
* @brief get names from a module registration
* @ingroup module_api
* @memberof oyCMMapi9_s
*
* @param[in] node the node context
* @param[in] name_type oyNAME_e, oyNAME_PATTERN
* @param[in] flags unused
* @param[in] select_core apply to api4 core else to api7
* @param[in] allocate_func user supplied memory allocator
* @return the fallback string
*
* @version Oyranos: 0.9.6
* @date 2014/06/29
* @since 2014/06/29 (Oyranos: 0.9.6)
*/
typedef char * (*oyCMMRegistrationToName_f) (
const char * registration,
int name_type,
uint32_t flags,
int select_core,
oyAlloc_f allocate_func );
/** typedef oyCMMGetDefaultPattern_f
* @brief get module default
* @ingroup module_api
* @memberof oyCMMapi9_s
*
* @param[in] pattern the basic pattern
* @param[in] flags unused
* @param[in] select_core apply to api4 core else to api7
* @param[in] allocate_func user supplied memory allocator
* @return the default pattern string
*
* @version Oyranos: 0.9.6
* @date 2014/07/21
* @since 2014/07/21 (Oyranos: 0.9.6)
*/
typedef char * (*oyCMMGetDefaultPattern_f) (
const char * base_pattern,
uint32_t flags,
int select_core,
oyAlloc_f allocate_func );
/* } Include "CMMapi9.private.h" */
/** @internal
* @struct oyCMMapi9_s_
* @brief Graph policies and various pattern handlers
* @ingroup module_api
* @extends oyCMMapiFilter_s
*/
struct oyCMMapi9_s_ {
/* Include "Struct.members.h" { */
const oyOBJECT_e type_; /**< The struct type tells Oyranos how to interprete hidden fields. @brief Type of object */
oyStruct_Copy_f copy; /**< @brief Copy function */
oyStruct_Release_f release; /**< @brief Release function */
oyObject_s oy_; /**< Features name and hash. Do not change during object life time. @brief Oyranos internal object */
/* } Include "Struct.members.h" */
/* Include "CMMapi.members.h" { */
oyCMMapi_s * next; /**< the next CMM api */
oyCMMInit_f oyCMMInit; /**< */
oyCMMMessageFuncSet_f oyCMMMessageFuncSet; /**< */
/** e.g. "sw/oyranos.org/color.tonemap.imaging/hydra.shiva.CPU.GPU" or "sw/oyranos.org/color/icc.lcms.CPU",
see as well @ref registration */
const char * registration;
/** 0: major - should be stable for the live time of a filter, \n
1: minor - mark new features, \n
2: patch version - correct errors */
int32_t version[3];
/** 0: last major Oyranos version during development time, e.g. 0
* 1: last minor Oyranos version during development time, e.g. 0
* 2: last Oyranos patch version during development time, e.g. 10
*/
int32_t module_api[3];
char * id_; /**< @private Oyranos id; keep to zero */
/* } Include "CMMapi.members.h" */
/* Include "CMMapiFilter.members.h" { */
oyCMMapi5_s_ * api5_; /**< @private meta module; keep to zero */
oyPointer_s * runtime_context; /**< data needed to run the filter */
/* } Include "CMMapiFilter.members.h" */
/* Include "CMMapi9.members.h" { */
/** check options for validy and correct */
oyCMMFilter_ValidateOptions_f oyCMMFilter_ValidateOptions;
oyWidgetEvent_f oyWidget_Event; /**< handle widget events */
const char * options; /**< default options */
oyCMMuiGet_f oyCMMuiGet; /**< xml ui elements for filter options*/
/**< XML namespace to describe the used data_types
* e.g. 'oy="http://www.oyranos.org/2009/oyranos"'
*/
const char * xml_namespace;
oyCMMobjectType_s ** object_types; /**< zero terminated list of types */
oyCMMGetText_f getText; /**< describe selectors in UI */
const char ** texts; /**< zero terminated categories for getText, e.g. {"///GPU","///CPU","//color",0} */
oyConversion_Correct_f oyConversion_Correct; /**< check a graph */
/** registration pattern which are supported by oyConversion_Correct \n
*
* e.g. for imaging this could be "//imaging"
*/
const char * pattern;
/** basic key which is used in the Oyranos DB together with "context",
* "renderer", "context_fallback" and "renderer_fallback" \n
*
* e.g. for CMMs this could be OY_CMM_STD
*/
const char * key_base;
/** get registration of fallback module for this pattern */
oyCMMGetFallback_f oyCMMGetFallback;
/** get name from module registration */
oyCMMRegistrationToName_f oyCMMRegistrationToName;
/** get the default pattern for a module group */
oyCMMGetDefaultPattern_f oyCMMGetDefaultPattern;
/* } Include "CMMapi9.members.h" */
};
oyCMMapi9_s_*
oyCMMapi9_New_( oyObject_s object );
oyCMMapi9_s_*
oyCMMapi9_Copy_( oyCMMapi9_s_ *cmmapi9, oyObject_s object);
oyCMMapi9_s_*
oyCMMapi9_Copy__( oyCMMapi9_s_ *cmmapi9, oyObject_s object);
int
oyCMMapi9_Release_( oyCMMapi9_s_ **cmmapi9 );
/* Include "CMMapi9.private_methods_declarations.h" { */
/* } Include "CMMapi9.private_methods_declarations.h" */
void oyCMMapi9_Release__Members( oyCMMapi9_s_ * cmmapi9 );
int oyCMMapi9_Init__Members( oyCMMapi9_s_ * cmmapi9 );
int oyCMMapi9_Copy__Members( oyCMMapi9_s_ * dst, oyCMMapi9_s_ * src);
#ifdef __cplusplus
} /* extern "C" */
/*}*/ /* namespace oyranos */
#endif /* __cplusplus */
#endif /* OY_CMM_API9_S__H */
| 33.025 | 138 | 0.578674 |
3a23b1cdcc7d7a815a9a0b9e12f897b2dc74819c | 289 | h | C | Example/NUKit/NLAppDelegate.h | liunina/NUKit | ebcb3a5ee1e0e606eefbb265583747d8c2cfc59e | [
"MIT"
] | null | null | null | Example/NUKit/NLAppDelegate.h | liunina/NUKit | ebcb3a5ee1e0e606eefbb265583747d8c2cfc59e | [
"MIT"
] | null | null | null | Example/NUKit/NLAppDelegate.h | liunina/NUKit | ebcb3a5ee1e0e606eefbb265583747d8c2cfc59e | [
"MIT"
] | null | null | null | //
// NLAppDelegate.h
// NUKit
//
// Created by i19850511@gmail.com on 12/05/2019.
// Copyright (c) 2019 i19850511@gmail.com. All rights reserved.
//
@import UIKit;
@interface NLAppDelegate : UIResponder <UIApplicationDelegate>
@property (strong, nonatomic) UIWindow *window;
@end
| 18.0625 | 64 | 0.719723 |
b17f9028679d54c080798c48afe2e26293fde0c0 | 5,624 | c | C | c2000/C2000Ware_1_00_06_00/device_support/f2806x/examples/cla/exp10/exp10_shared_data.c | ramok/Themis_ForHPSDR | d0f323a843ac0a488ef816ccb7c828032855a40a | [
"Unlicense"
] | null | null | null | c2000/C2000Ware_1_00_06_00/device_support/f2806x/examples/cla/exp10/exp10_shared_data.c | ramok/Themis_ForHPSDR | d0f323a843ac0a488ef816ccb7c828032855a40a | [
"Unlicense"
] | null | null | null | c2000/C2000Ware_1_00_06_00/device_support/f2806x/examples/cla/exp10/exp10_shared_data.c | ramok/Themis_ForHPSDR | d0f323a843ac0a488ef816ccb7c828032855a40a | [
"Unlicense"
] | 1 | 2021-07-21T08:10:37.000Z | 2021-07-21T08:10:37.000Z | //###########################################################################
//
// TITLE: exp10_shared_data.c
//
// Description: Declare shared memory variables and assign them to specific
// CLA-accessible memory locations
//
//! \addtogroup f2806x_example_cla_list
//! \b Memory \b Allocation \n
//! - CLA1 Math Tables (RAML2)
//! - CLAexpTable - Lookup table
//! - CLA1 to CPU Message RAM
//! - ExpRes - Result of the exponentiation operation
//! - CPU to CLA1 Message RAM
//! - Val - Test Input
//
//###########################################################################
// $TI Release: F2806x Support Library v2.04.00.00 $
// $Release Date: Thu Oct 18 15:47:20 CDT 2018 $
// $Copyright:
// Copyright (C) 2009-2018 Texas Instruments Incorporated - http://www.ti.com/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the
// distribution.
//
// Neither the name of Texas Instruments Incorporated nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// $
//###########################################################################
//
// Included Files
//
#include "DSP28x_Project.h"
//
// Include the test header file whose name is based on the test name
// which is defined by the macro TEST on the command line
//
#include XSTRINGIZE(XCONCAT(TEST_NAME,_shared.h))
//
// Globals
//
//
// Ensure that all data is placed in the data rams
//
//
// Task 1 (C) Variables
//
float y[BUFFER_SIZE];
#pragma DATA_SECTION(Val,"CpuToCla1MsgRAM");
float Val;
#pragma DATA_SECTION(ExpRes,"Cla1ToCpuMsgRAM")
float ExpRes;
//
// Task 2 (C) Variables
//
//
// Task 3 (C) Variables
//
//
// Task 4 (C) Variables
//
//
// Task 5 (C) Variables
//
//
// Task 6 (C) Variables
//
//
// Task 7 (C) Variables
//
//
// Task 8 (C) Variables
//
//
// Common (C) Variables
//
//
// The Exponential table
//
#pragma DATA_SECTION(CLAexpTable,"CLA1mathTables")
float CLAexpTable[] = {
1.0, 2.7182818285e+0, 7.3890560989e+0, 2.0085536923e+1,
5.4598150033e+1, 1.484131591e+2, 4.0342879349e+2, 1.0966331584e+3,
2.980957987e+3, 8.1030839276e+3, 2.2026465795e+4, 5.9874141715e+4,
1.6275479142e+5, 4.4241339201e+5, 1.2026042842e+6, 3.2690173725e+6,
8.8861105205e+6, 2.4154952754e+7, 6.5659969137e+7, 1.7848230096e+8,
4.8516519541e+8, 1.3188157345e+9, 3.5849128461e+9, 9.7448034462e+9,
2.648912213e+10, 7.2004899337e+10, 1.9572960943e+11, 5.320482406e+11,
1.4462570643e+12, 3.9313342971e+12, 1.0686474582e+13, 2.9048849665e+13,
7.8962960183e+13, 2.1464357979e+14, 5.8346174253e+14, 1.5860134523e+15,
4.3112315471e+15, 1.1719142373e+16, 3.1855931757e+16, 8.6593400424e+16,
2.3538526684e+17, 6.3984349353e+17, 1.7392749415e+18, 4.7278394682e+18,
1.2851600114e+19, 3.4934271057e+19, 9.4961194206e+19, 2.5813128862e+20,
7.0167359121e+20, 1.9073465725e+21, 5.1847055286e+21, 1.4093490824e+22,
3.8310080007e+22, 1.0413759433e+23, 2.8307533033e+23, 7.6947852651e+23,
2.091659496e+24, 5.6857199993e+24, 1.5455389356e+25, 4.2012104038e+25,
1.1420073898e+26, 3.1042979357e+26, 8.4383566687e+26, 2.2937831595e+27,
6.2351490808e+27, 1.6948892444e+28, 4.6071866343e+28, 1.2523631708e+29,
3.4042760499e+29, 9.2537817256e+29, 2.5154386709e+30, 6.8376712298e+30,
1.8586717453e+31, 5.0523936303e+31, 1.3733829795e+32, 3.7332419968e+32,
1.0148003881e+33, 2.7585134545e+33, 7.498416997e+33, 2.0382810665e+34,
5.5406223844e+34, 1.5060973146e+35, 4.0939969621e+35, 1.1128637548e+36,
3.0250773222e+36, 8.2230127146e+36, 2.2352466037e+37, 6.0760302251e+37,
1.651636255e+38
};
#pragma DATA_SECTION(CLALOG10_E,"CpuToCla1MsgRAM");
float CLALOG10_E = 2.30258509;
//
// Coefficients in the Taylor series expansion of exp(A/B)
//
#pragma DATA_SECTION(CLAINV1,"CpuToCla1MsgRAM")
float CLAINV1 = 1.0 ;
#pragma DATA_SECTION(CLAINV2,"CpuToCla1MsgRAM")
float CLAINV2 = 0.5 ; //1/2
#pragma DATA_SECTION(CLAINV3,"CpuToCla1MsgRAM")
float CLAINV3 = 0.333333333333333333; //1/3
#pragma DATA_SECTION(CLAINV4,"CpuToCla1MsgRAM")
float CLAINV4 = 0.25; //1/4
#pragma DATA_SECTION(CLAINV5,"CpuToCla1MsgRAM")
float CLAINV5 = 0.20; //1/5
#pragma DATA_SECTION(CLAINV6,"CpuToCla1MsgRAM")
float CLAINV6 = 0.166666666666666666; //1/6
#pragma DATA_SECTION(CLAINV7,"CpuToCla1MsgRAM")
float CLAINV7 = 0.142857142857142857; //1/7
//
// End of File
//
| 33.47619 | 78 | 0.696835 |
55d2f9bd8c4cb4f2d19a0d37fa756a3b58194ff8 | 501 | h | C | Tikeyc/Classes/MainPart/MainCenter/Baisibudejie/View/JYJTopicVideoView.h | tikeyc/Tikeyc | 98418c53100029439138af6904f2ba96805dd75d | [
"Apache-2.0"
] | 3 | 2016-08-17T09:44:56.000Z | 2018-03-28T06:31:04.000Z | Tikeyc/Classes/MainPart/MainCenter/Baisibudejie/View/JYJTopicVideoView.h | tikeyc/Tikeyc | 98418c53100029439138af6904f2ba96805dd75d | [
"Apache-2.0"
] | null | null | null | Tikeyc/Classes/MainPart/MainCenter/Baisibudejie/View/JYJTopicVideoView.h | tikeyc/Tikeyc | 98418c53100029439138af6904f2ba96805dd75d | [
"Apache-2.0"
] | null | null | null | //
// JYJTopicVideoView.h
// JYJ不得姐
//
// Created by JYJ on 16/5/24.
// Copyright © 2016年 baobeikeji. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "ZFPlayer.h"
@class JYJTopic;
@interface JYJTopicVideoView : UIView
/** 视频模型数据 */
@property (nonatomic, strong) JYJTopic *topic;
@property (nonatomic, strong) UITableView *tableView;
@property (nonatomic, strong) NSIndexPath *currentIndexPath;
@property (nonatomic, strong) ZFPlayerView *playerView;
+ (instancetype)videoView;
@end
| 19.269231 | 60 | 0.728543 |
1d053918aa9f9fc3f2979783274849e70d212f9b | 3,234 | c | C | Laboratorio1.Parcial2_Pandemia/src/Read_From_Directory/ReadFiles.c | caidevOficial/C_Labo1_Progra1 | db37a077347160062e3db960a4ec095afa273acf | [
"MIT"
] | 1 | 2020-09-15T05:31:01.000Z | 2020-09-15T05:31:01.000Z | Laboratorio1.Parcial2_Pandemia/src/Read_From_Directory/ReadFiles.c | caidevOficial/Laboratorio1_Programacion1 | db37a077347160062e3db960a4ec095afa273acf | [
"MIT"
] | null | null | null | Laboratorio1.Parcial2_Pandemia/src/Read_From_Directory/ReadFiles.c | caidevOficial/Laboratorio1_Programacion1 | db37a077347160062e3db960a4ec095afa273acf | [
"MIT"
] | 1 | 2021-09-16T23:34:52.000Z | 2021-09-16T23:34:52.000Z | /*
* ============================================================================
* This program is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation, either version 3 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
* ============================================================================
* Type: Recuperatorio segundo parcial - Laboratorio 1.
* Version : Beta 1.1.2 [Beta v1.1.2] - FacuFalcone_ABM_Pandemia_[Linux]
* ============================================================================
*/
#include "../Read_From_Directory/ReadFiles.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <dirent.h>
#include <errno.h>
void error(const char *s){
/* perror() devuelve la cadena S y el error (en cadena de caracteres) que tenga errno */
perror(s);
}
void processFile(char *archivo){
/* Al procesar el archivo imprimo tamano en Bytes, KiloBytes, MegaBytes o Gigabytes */
FILE *fich;
double ftam;
char peso[20];
fich=fopen(archivo, "r");
if (fich){
fseek(fich, 0L, SEEK_END);
ftam=ftell(fich);
if(ftam<1024){
strcpy(peso,"Bytes");
}else{
ftam/=1024;
strcpy(peso,"KiloBytes");
if(ftam>1024){
ftam/=1024;
strcpy(peso,"MegaBytes");
if(ftam>1024){
ftam/=1024;
strcpy(peso,"GigaBytes");
}
}
}
fclose(fich);
/* Si va bien, imprime el Nombre, tamanho y tipo de peso. */
printf (" [%-23s] [%06.2f] [%-10s]\n", archivo, ftam,peso);
}else{
/* Si falla, solo imprime el nombre. */
printf (" [%-23s] (No info.)\n", archivo);
}
}
int Read_Directory(){
int success = 0;
DIR *dir; // Puntero a dir para abrir el directorio actual.
struct dirent *ent; // Guardo la info de cada archivo que leo.
dir = opendir("."); // Leo el directorio actual.
if (dir == NULL){ /* Chequeo que no haya error */
error("No puedo abrir el directorio");
}else{
printf("\n __________________________________________________\n"
" [Message] Archivos actuales en el directorio ROOT:\n"
" __________________________________________________\n");
while ((ent = readdir (dir)) != NULL){
/* Simulo comando ls para listar directorio actual y anterior */
if((strcmp(ent->d_name, ".")) &&
(strcmp(ent->d_name, "..")) &&
(strcmp(ent->d_name, "src")) &&
(strcmp(ent->d_name,"Debug"))){
//Proceso archivos sin contar la carpeta src ni Debug.
processFile(ent->d_name); // Obtengo el nombre del archivo y lo proceso.
}
}
printf(" __________________________________________________\n"
"\n");
closedir (dir);
success = 1;
}
return success;
}
| 32.34 | 90 | 0.600186 |
0e87d3dedf6d81852f239fe141c4c2b8b4112a49 | 2,535 | c | C | devices/devices.c | jpmens/tripp | 3a9391fe78ef37de136ed6a00ebfd2e4a35e4f86 | [
"BSD-1-Clause"
] | 18 | 2017-08-13T18:19:02.000Z | 2021-05-10T00:01:51.000Z | devices/devices.c | jpmens/tripp | 3a9391fe78ef37de136ed6a00ebfd2e4a35e4f86 | [
"BSD-1-Clause"
] | 18 | 2018-01-08T09:13:25.000Z | 2020-12-01T08:06:37.000Z | devices/devices.c | jpmens/tripp | 3a9391fe78ef37de136ed6a00ebfd2e4a35e4f86 | [
"BSD-1-Clause"
] | 8 | 2017-12-21T07:15:08.000Z | 2020-12-21T17:24:34.000Z | /*
* qtripp
* Copyright (C) 2017-2020 Jan-Piet Mens <jp@mens.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "devices.h"
#include "devices.i" /* generated from devices.j2 */
static struct _device *myhash = NULL;
void load_devices()
{
struct _device *rp, *s;
for (rp = devices; rp->id != NULL; rp++) {
HASH_FIND_STR(myhash, rp->id, s);
if (s != NULL) {
fprintf(stderr, "Fatal: device hash for %s already in hash\n", rp->id);
exit(7);
}
HASH_ADD_KEYPTR(hh, myhash, rp->id, strlen(rp->id), rp);
}
}
void free_devices()
{
struct _device *s, *tmp;
HASH_ITER(hh, myhash, s, tmp) {
HASH_DEL(myhash, s);
/* nothing to free as the structure itself is static */
// free(s);
}
}
/*
* Look up device subtype (e.g. GTFRI) with model/major/minor.
* If that particular MOMAMI doesn't exist, return the data for MO0000 if existent.
* If that doesn't exist, return the data for 00MAMI if existent.
* If that doesn't exist, return the data for 00000 if existent.
*/
struct _device *lookup_devices(char *key, char *momami)
{
struct _device *s;
char id[64];
snprintf(id, sizeof(id), "%s-%s", key, momami);
HASH_FIND_STR(myhash, id, s);
if (s)
return (s);
snprintf(id, sizeof(id), "%s-%.2s0000", key, momami);
HASH_FIND_STR(myhash, id, s);
if (s)
return (s);
snprintf(id, sizeof(id), "%s-00%s", key, momami + 2);
HASH_FIND_STR(myhash, id, s);
if (s)
return (s);
snprintf(id, sizeof(id), "%s-000000", key);
HASH_FIND_STR(myhash, id, s);
return (s);
}
#ifdef TESTING
int main()
{
struct _device *rp;
load_devices();
rp = lookup_devices("GTFRI", "2C0600");
if (rp) {
printf("%s -> %d\n", rp->id, rp->num);
}
rp = lookup_devices("GTFRI", "308891");
if (rp) {
printf("%s -> %d\n", rp->id, rp->num);
}
free_devices();
}
#endif
| 23.691589 | 83 | 0.6643 |
d9d727ca68a338ffc20f85ec7e55a17d96c7e491 | 2,182 | h | C | src/npf/dpi/npf_appdb.h | srinivasknarayan/vyatta-dataplane | d66f721b04fc510c01e5cf0fd786e3ff42aec408 | [
"BSD-2-Clause"
] | 29 | 2019-11-18T20:02:45.000Z | 2021-11-19T11:59:45.000Z | src/npf/dpi/npf_appdb.h | srinivasknarayan/vyatta-dataplane | d66f721b04fc510c01e5cf0fd786e3ff42aec408 | [
"BSD-2-Clause"
] | 10 | 2019-12-09T15:32:17.000Z | 2021-04-28T14:34:34.000Z | src/npf/dpi/npf_appdb.h | srinivasknarayan/vyatta-dataplane | d66f721b04fc510c01e5cf0fd786e3ff42aec408 | [
"BSD-2-Clause"
] | 24 | 2019-11-16T07:13:22.000Z | 2022-03-29T15:56:42.000Z | /*
* Copyright (c) 2020 AT&T Intellectual Property. All rights reserved.
*
* SPDX-License-Identifier: LGPL-2.1-only
*/
#ifndef NPF_APPDB_H
#define NPF_APPDB_H
#include <stdbool.h>
#include <stdint.h>
#include "json_writer.h"
/**
* Walker prototype.
*/
struct adb_entry;
typedef int (app_walker_t)(json_writer_t *json, struct adb_entry *entry);
/**
* Initialise the application database.
* Returns 0 on success; errno on failure.
*/
int appdb_init(void);
/**
* Destroy the application database.
*/
void appdb_destroy(void);
/**
* Write the JSON representation of the application database name entry, given
* with the data pointer.
* Intended for use with appdb_name_walk. Therefore, returns 0 on success.
*/
int appdb_name_entry_to_json(json_writer_t *json, struct adb_entry *entry);
/**
* Walk the application database name entries.
*/
int appdb_name_walk(json_writer_t *json, app_walker_t *callback);
/*
* Lookup the given application name in the application DB.
* Return the application ID, or DPI_APP_NA if not found.
*/
uint32_t appdb_name_to_id(const char *name);
/**
* Write the JSON representation of the application database ID entry, given
* with the data pointer.
* Intended for use with appdb_id_walk. Therefore, returns 0 on success.
*/
int appdb_id_entry_to_json(json_writer_t *json, struct adb_entry *entry);
/**
* Walk the application database ID entries.
*/
int appdb_id_walk(json_writer_t *json, app_walker_t *callback);
/*
* Lookup the given application ID in the application DB.
* Return the application name, or NULL if not found.
*/
const char *appdb_id_to_name(uint32_t app_id);
/*
* Find an existing app DB entry with the given name and increment its
* refcount. If not found, then create a new entry.
*/
struct adb_entry *appdb_find_or_alloc(const char *name);
/*
* Decrement the given appDB entry's refcount.
* If zero then remove the entry from the appDB.
*/
bool appdb_dealloc(struct adb_entry *entry);
/**
* Return the application ID from the given adb_entry,
* or return DPI_APP_USER_NA if the given entry doesn't exist.
*/
uint32_t appdb_entry_get_id(struct adb_entry *e);
#endif /* NPF_APPDB_H */
| 25.372093 | 78 | 0.742438 |
ce1c0956d3730e503a979658677e7cabb7b11bc6 | 272 | h | C | Example/ThemeManager/zhAppDelegate.h | snail-z/ThemeManager | 866fb0297181ad15b943c7f81f6165b96b190cc0 | [
"MIT"
] | 172 | 2017-08-31T16:42:03.000Z | 2021-08-06T03:35:49.000Z | Example/ThemeManager/zhAppDelegate.h | snail-z/ThemeManager | 866fb0297181ad15b943c7f81f6165b96b190cc0 | [
"MIT"
] | 1 | 2019-08-15T10:04:05.000Z | 2019-08-15T10:04:05.000Z | Example/ThemeManager/zhAppDelegate.h | snail-z/ThemeManager | 866fb0297181ad15b943c7f81f6165b96b190cc0 | [
"MIT"
] | 30 | 2017-09-01T05:05:37.000Z | 2021-01-04T03:06:14.000Z | //
// zhAppDelegate.h
// ThemeManager
//
// Created by snail-z on 08/31/2017.
// Copyright (c) 2017 snail-z. All rights reserved.
//
@import UIKit;
@interface zhAppDelegate : UIResponder <UIApplicationDelegate>
@property (strong, nonatomic) UIWindow *window;
@end
| 17 | 62 | 0.709559 |
dbd66c217831a8292788b49b4187b8bd7a46d22f | 1,001 | h | C | Twitter-Dumped/7.51.5/t1/TFNTwitterGeoDataProvider-Protocol.h | ichitaso/TwitterListEnabler | d4d9ba973e59ff7f0d97ae74fc473bdd0aea54df | [
"MIT"
] | 1 | 2019-10-15T09:26:49.000Z | 2019-10-15T09:26:49.000Z | Twitter-Dumped/7.52/TFNTwitterGeoDataProvider-Protocol.h | ichitaso/TwitterListEnabler | d4d9ba973e59ff7f0d97ae74fc473bdd0aea54df | [
"MIT"
] | null | null | null | Twitter-Dumped/7.52/TFNTwitterGeoDataProvider-Protocol.h | ichitaso/TwitterListEnabler | d4d9ba973e59ff7f0d97ae74fc473bdd0aea54df | [
"MIT"
] | 1 | 2019-11-17T06:06:49.000Z | 2019-11-17T06:06:49.000Z | //
// Generated by class-dump 3.5 (64 bit) (Debug version compiled Sep 17 2017 16:24:48).
//
// class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard.
//
#import <T1Twitter/NSObject-Protocol.h>
@class NSString, TFSTwitterLocation;
@protocol TFNTwitterGeoDataProvider <NSObject>
@property(readonly, getter=isUsingRetainedGeoDataAllowed) _Bool usingRetainedGeoDataAllowed;
@property double locationUpdateDuration;
@property double locationUpdateInterval;
@property(readonly, getter=isEnabled) _Bool enabled;
@property(readonly, copy) NSString *timezoneName;
@property(readonly) _Bool locationServicesAvailable;
@property(readonly) TFSTwitterLocation *currentLocation;
@property(readonly, getter=isCachedLocationAvailable) _Bool cachedLocationAvailable;
- (void)getRealtimeLocationWithCallback:(void (^)(_Bool, TFSTwitterLocation *, NSError *))arg1;
- (void)getLocationServicesAvailableWithCallback:(void (^)(_Bool))arg1;
- (void)stopUpdates;
- (void)startUpdates;
@end
| 38.5 | 95 | 0.795205 |
e0400621956eaa10ac1abaae11ff7d7e582bcdc7 | 1,574 | h | C | multiplayer_game/src/lflare_handler.h | llGuy/gamedev | 16aa203934fd767926c58558e021630288556399 | [
"MIT"
] | null | null | null | multiplayer_game/src/lflare_handler.h | llGuy/gamedev | 16aa203934fd767926c58558e021630288556399 | [
"MIT"
] | 4 | 2018-12-24T11:16:53.000Z | 2018-12-24T11:20:29.000Z | multiplayer_game/src/lflare_handler.h | llGuy/gamedev | 16aa203934fd767926c58558e021630288556399 | [
"MIT"
] | null | null | null | #ifndef _LENS_FLARE_H_
#define _LENS_FLARE_H_
#include <glm/glm.hpp>
#if (defined cpp_no_optional)
#include "optional.hpp"
#else
#include <optional>
#endif
#include "texture.h"
#include "buffer.h"
#include "vao.h"
#include <array>
namespace mulgame {
class LFlareHandler
{
public:
static constexpr uint32_t NUM_FLARE_TEXTURES = 10;
LFlareHandler(void);
std::optional<glm::vec2> SunScreenCoord(const glm::vec3& lightSource,
const glm::mat4& view, const glm::mat4& projection);
void UpdatePositions(const glm::vec3& lightSource,
const glm::mat4& view, const glm::mat4& projection);
Texture& Tex(uint32_t index = 0)
{
return m_textures[index];
}
VAO& VertexArray(void)
{
return m_vao;
}
Buffer& OGLBuffer(void)
{
return m_buffer;
}
glm::vec2& Position(uint32_t index = 0)
{
return m_flarePositions[index];
}
float Scale(uint32_t index = 0)
{
return m_flareScales[index];
}
bool Visible(void)
{
return m_visible;
}
float Brightness(void)
{
return m_brightness;
}
private:
void CreateBuffer(Buffer& buffer, glm::vec2* vertices, uint32_t size);
void CreateVAO(VAO& vao, Buffer& buffer);
void CalculateFlarePositions(const glm::vec2& suntoCenter, const glm::vec2& sunCoords);
private:
VAO m_vao;
Buffer m_buffer;
std::array<Texture, NUM_FLARE_TEXTURES> m_textures;
std::array<glm::vec2, NUM_FLARE_TEXTURES> m_flarePositions;
std::array<float, NUM_FLARE_TEXTURES> m_flareScales;
glm::vec2 m_center;
float m_brightness;
float m_spacing;
bool m_visible;
};
}
#endif
| 20.179487 | 89 | 0.710292 |
0d5d0d257860e7838f87ef9c913cba3e27058aeb | 5,973 | h | C | variant/internal/variant_traits.h | ofats/variant | 4d7cd158a2286b28ca8db50c1111a223150a6807 | [
"MIT"
] | null | null | null | variant/internal/variant_traits.h | ofats/variant | 4d7cd158a2286b28ca8db50c1111a223150a6807 | [
"MIT"
] | null | null | null | variant/internal/variant_traits.h | ofats/variant | 4d7cd158a2286b28ca8db50c1111a223150a6807 | [
"MIT"
] | null | null | null | #pragma once
#include <exception>
#include "matrix_ops.h"
namespace base {
template <class... Ts>
class variant;
class bad_variant_access : public std::exception {};
namespace detail {
struct variant_accessor {
template <std::size_t I, class... Ts>
static base::type_pack_element_t<I, Ts...>& get(variant<Ts...>& v);
template <std::size_t I, class... Ts>
static const base::type_pack_element_t<I, Ts...>& get(
const variant<Ts...>& v);
template <std::size_t I, class... Ts>
static base::type_pack_element_t<I, Ts...>&& get(variant<Ts...>&& v);
template <std::size_t I, class... Ts>
static const base::type_pack_element_t<I, Ts...>&& get(
const variant<Ts...>&& v);
};
constexpr std::size_t variant_npos = -1;
template <class T, class... Ts>
constexpr std::size_t index_of_impl() {
bool bs[] = {std::is_same<T, Ts>::value...};
std::size_t result = variant_npos;
std::size_t count = 0;
for (std::size_t i = 0; i < sizeof...(Ts); ++i) {
if (bs[i]) {
++count;
result = i;
}
}
return count == 1 ? result : variant_npos;
}
// Given some time `T` and type pack `Ts`, returns relative position of `T` in
// `Ts`. If `T` not presented in `Ts` or `T` presented in `Ts` more then once,
// returns `variant_npos`.
template <class T, class... Ts>
constexpr std::size_t index_of = index_of_impl<T, Ts...>();
static_assert(index_of<int, int, double> == 0, "");
static_assert(index_of<int, double> == variant_npos, "");
static_assert(index_of<int, int, double, int> == variant_npos, "");
static_assert(index_of<int> == variant_npos, "");
template <class F, class Indexes, class... Vs>
struct visit_concrete_result;
template <class F, std::size_t... indexes, class... Vs>
struct visit_concrete_result<F, std::index_sequence<indexes...>, Vs...>
: base::invoke_result<F, decltype(variant_accessor::get<indexes>(
std::declval<Vs>()))...> {};
template <class F, class Indexes, class... Vs>
using visit_concrete_result_t =
base::subtype<visit_concrete_result<F, Indexes, Vs...>>;
template <class T, class... Ts>
constexpr bool types_are_same = base::conjunction_v<std::is_same<T, Ts>...>;
template <class F, class Indexes, class... Vs>
struct is_visitable_concrete;
template <class F, std::size_t... indexes, class... Vs>
struct is_visitable_concrete<F, std::index_sequence<indexes...>, Vs...>
: base::is_invocable<F, decltype(variant_accessor::get<indexes>(
std::declval<Vs>()))...> {};
template <class F, bool typesAreSame, class... Vs>
struct visit_result_if_same
: base::invoke_result<F, decltype(variant_accessor::get<0>(
std::declval<Vs>()))...> {};
template <class F, class... Vs>
struct visit_result_if_same<F, false, Vs...> {};
template <class F, bool invokable, class IndexPacks, class... Vs>
struct visit_result_if_visitable {};
template <class F, class... IndexPacks, class... Vs>
struct visit_result_if_visitable<F, true, base::type_pack<IndexPacks...>, Vs...>
: visit_result_if_same<
F, types_are_same<visit_concrete_result_t<F, IndexPacks, Vs...>...>,
Vs...> {};
template <class F, class IndexPacks, class... Vs>
struct visit_result_impl;
template <class F, class... IndexPacks, class... Vs>
struct visit_result_impl<F, base::type_pack<IndexPacks...>, Vs...>
: visit_result_if_visitable<
F,
base::conjunction_v<is_visitable_concrete<F, IndexPacks, Vs...>...>,
base::type_pack<IndexPacks...>, Vs...> {};
template <class F, class... Vs>
struct visit_result
: visit_result_impl<
F,
decltype(matops::build_all_matrix_indexes(
std::index_sequence<
base::template_parameters_count_v<std::decay_t<Vs>>...>{})),
Vs...> {};
template <class F, class... Vs>
using visit_result_t = base::subtype<visit_result<F, Vs...>>;
template <class R, class FRef, class... VRefs, std::size_t... ids>
R unwrap_indexes(FRef f, VRefs... vs, std::index_sequence<ids...>) {
return std::forward<FRef>(f)(
variant_accessor::get<ids - 1>(std::forward<VRefs>(vs))...);
}
// Normal case (when no one variant is valueless by exception).
template <class R, class Indexes, bool valueless, class FRef, class... VRefs>
constexpr std::enable_if_t<!valueless, R> visit_concrete(FRef f, VRefs... vs) {
return unwrap_indexes<R, FRef, VRefs...>(
std::forward<FRef>(f), std::forward<VRefs>(vs)..., Indexes{});
}
// Valueless case (when some of variants is valueless by exception).
template <class R, class Indexes, bool valueless, class FRef, class... VRefs>
constexpr std::enable_if_t<valueless, R> visit_concrete(FRef, VRefs...) {
throw bad_variant_access{};
}
// (size_t)(-1) + 1 == 0, so if one of indexes is 0, variant is valueless.
template <std::size_t... ids>
constexpr bool check_valueless(std::index_sequence<ids...>) {
const bool bs[] = {(ids == 0)...};
for (const bool b : bs) {
if (b) {
return true;
}
}
return false;
}
template <class R, class F, class... Vs, class... IndexPacks>
R visit(F&& f, base::type_pack<IndexPacks...>, Vs&&... vs) {
using handler_type = R (*)(F&&, Vs && ...);
using FakeSizes = std::index_sequence<
1 + base::template_parameters_count_v<std::decay_t<Vs>>...>;
static constexpr handler_type handlers[] = {
visit_concrete<R, IndexPacks, check_valueless(IndexPacks{}), F&&,
Vs&&...>...};
const std::size_t idx =
matops::normal_to_flat_index(FakeSizes{}, (vs.index() + 1)...);
return handlers[idx](std::forward<F>(f), std::forward<Vs>(vs)...);
}
template <class R, class F, class T>
R call_if_same(F&& f, T&& a, T&& b) {
return std::forward<F>(f)(std::forward<T>(a), std::forward<T>(b));
}
template <class R, class F, class T, class U>
R call_if_same(F&&, T&&, U&&) { // Will never be called
std::terminate();
}
} // namespace detail
} // namespace base
| 32.818681 | 80 | 0.641888 |
ef227d0f3919bcae94a424643756293c80226e6b | 7,917 | c | C | STM32F030/STM32F0xx_StdPeriph_Lib_V1.5.0/Projects/STM32F0xx_StdPeriph_Examples/COMP/COMP_PulseWidthMeasurement/main.c | EEWX/VFD28016-display | 94f799cbb890784818ba3e87457d68dfe88c0bbb | [
"CC-BY-4.0"
] | null | null | null | STM32F030/STM32F0xx_StdPeriph_Lib_V1.5.0/Projects/STM32F0xx_StdPeriph_Examples/COMP/COMP_PulseWidthMeasurement/main.c | EEWX/VFD28016-display | 94f799cbb890784818ba3e87457d68dfe88c0bbb | [
"CC-BY-4.0"
] | null | null | null | STM32F030/STM32F0xx_StdPeriph_Lib_V1.5.0/Projects/STM32F0xx_StdPeriph_Examples/COMP/COMP_PulseWidthMeasurement/main.c | EEWX/VFD28016-display | 94f799cbb890784818ba3e87457d68dfe88c0bbb | [
"CC-BY-4.0"
] | null | null | null | /**
******************************************************************************
* @file COMP/COMP_PulseWidthMeasurement/main.c
* @author MCD Application Team
* @version V1.4.0
* @date 24-July-2014
* @brief Main program body
******************************************************************************
* @attention
*
* <h2><center>© COPYRIGHT 2014 STMicroelectronics</center></h2>
*
* Licensed under MCD-ST Liberty SW License Agreement V2, (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.st.com/software_license_agreement_liberty_v2
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************
*/
/* Includes ------------------------------------------------------------------*/
#include "main.h"
/** @addtogroup STM32F0xx_StdPeriph_Examples
* @{
*/
/** @addtogroup COMP_PulseWidthMeasurement
* @{
*/
/* Private typedef -----------------------------------------------------------*/
/* Private define ------------------------------------------------------------*/
/* Private macro -------------------------------------------------------------*/
/* Private variables ---------------------------------------------------------*/
__IO uint32_t Capture = 0;
__IO uint32_t MeasuredPulse = 0;
__IO uint32_t DisplayActive = 0;
/* Private function prototypes -----------------------------------------------*/
/* Private functions ---------------------------------------------------------*/
static void DAC_Config(void);
static void COMP_Config(void);
static void TIM_Config(void);
static void DisplayOnLCD(uint32_t data);
/**
* @brief Main program.
* @param None
* @retval None
*/
int main(void)
{
/*!< At this stage the microcontroller clock setting is already configured,
this is done through SystemInit() function which is called from startup
file (startup_stm32f0xx.s) before to branch to application main.
To reconfigure the default setting of SystemInit() function, refer to
system_stm32f0xx.c file
*/
/* Initialize the TFT-LCD */
#ifdef USE_STM320518_EVAL
STM320518_LCD_Init();
#else
STM32072B_LCD_Init();
#endif /* USE_STM320518_EVAL */
/* Clear the TFT-LCD */
LCD_Clear(LCD_COLOR_WHITE);
/* DAC Channel1 configuration */
DAC_Config();
/* COMP1 Configuration */
COMP_Config();
/* TIM2 Configuration in input capture mode */
TIM_Config();
/* Infinite loop */
while (1)
{
if (DisplayActive != 0)
{
/* Compute the pulse width in us */
MeasuredPulse = (uint32_t)(((uint64_t) Capture * 1000000) / ((uint32_t)SystemCoreClock));
/* Display measured pulse width on Glass LCD and color LCD */
DisplayOnLCD(MeasuredPulse);
DisplayActive = 0;
}
}
}
/**
* @brief Configures the DAC channel 1 with output buffer enabled.
* @param None
* @retval None
*/
static void DAC_Config(void)
{
/* Init Structure definition */
DAC_InitTypeDef DAC_InitStructure;
/* DAC clock enable */
RCC_APB1PeriphClockCmd(RCC_APB1Periph_DAC, ENABLE);
/* Fill DAC InitStructure */
DAC_StructInit(&DAC_InitStructure);
DAC_InitStructure.DAC_Trigger = DAC_Trigger_None;
DAC_InitStructure.DAC_OutputBuffer = DAC_OutputBuffer_Enable;
/* DAC Channel1 Init */
DAC_Init(DAC_Channel_1, &DAC_InitStructure);
/* Enable DAC Channel1 */
DAC_Cmd(DAC_Channel_1, ENABLE);
/* Set DAC Channel1 DHR register: DAC_OUT1 = (3.3 * 2000) / 4095 ~ 1.61 V */
DAC_SetChannel1Data(DAC_Align_12b_R, 2000);
}
/**
* @brief Configures COMP1: DAC channel 1 to COMP1 inverting input
* and COMP1 output to TIM2 IC4.
* @param None
* @retval None
*/
static void COMP_Config(void)
{
/* Init Structure definition */
COMP_InitTypeDef COMP_InitStructure;
GPIO_InitTypeDef GPIO_InitStructure;
/* GPIOA Peripheral clock enable */
RCC_AHBPeriphClockCmd(RCC_AHBPeriph_GPIOA, ENABLE);
/* Configure PA1: PA1 is used as COMP1 non inveting input */
GPIO_StructInit(&GPIO_InitStructure);
GPIO_InitStructure.GPIO_Pin = GPIO_Pin_1;
GPIO_InitStructure.GPIO_Mode = GPIO_Mode_AN;
GPIO_InitStructure.GPIO_PuPd = GPIO_PuPd_NOPULL;
GPIO_Init(GPIOA, &GPIO_InitStructure);
/* COMP Peripheral clock enable */
RCC_APB2PeriphClockCmd(RCC_APB2Periph_SYSCFG, ENABLE);
/* COMP1 Init: DAC1 output is used COMP1 inverting input */
COMP_StructInit(&COMP_InitStructure);
COMP_InitStructure.COMP_InvertingInput = COMP_InvertingInput_DAC1;
/* Redirect COMP1 output to TIM2 Input capture 4 */
COMP_InitStructure.COMP_Output = COMP_Output_TIM2IC4;
COMP_InitStructure.COMP_Mode = COMP_Mode_HighSpeed;
COMP_InitStructure.COMP_Hysteresis = COMP_Hysteresis_No;
COMP_Init(COMP_Selection_COMP1, &COMP_InitStructure);
/* Enable COMP1 */
COMP_Cmd(COMP_Selection_COMP1, ENABLE);
}
/**
* @brief Configures TIM2 channel 4 in input capture mode
* @param None
* @retval None
*/
static void TIM_Config(void)
{
/* Init Structure definition */
TIM_ICInitTypeDef TIM_ICInitStructure;
TIM_TimeBaseInitTypeDef TIM_TimeBaseStructure;
NVIC_InitTypeDef NVIC_InitStructure;
/* TIM2 clock enable */
RCC_APB1PeriphClockCmd(RCC_APB1Periph_TIM2, ENABLE);
/* TIM2 Time base configuration */
TIM_TimeBaseStructInit(&TIM_TimeBaseStructure);
TIM_TimeBaseStructure.TIM_Prescaler = 0;
TIM_TimeBaseStructure.TIM_CounterMode = TIM_CounterMode_Up;
TIM_TimeBaseStructure.TIM_Period = 65535;
TIM_TimeBaseStructure.TIM_ClockDivision = TIM_CKD_DIV1;
TIM_TimeBaseInit(TIM2, &TIM_TimeBaseStructure);
TIM_ClearFlag(TIM2, TIM_FLAG_Update);
/* TIM2 Channel4 Input capture Mode configuration */
TIM_ICStructInit(&TIM_ICInitStructure);
TIM_ICInitStructure.TIM_Channel = TIM_Channel_4;
/* TIM2 counter is captured at each transition detection: rising or falling edges (both edges) */
TIM_ICInitStructure.TIM_ICPolarity = TIM_ICPolarity_BothEdge;
TIM_ICInitStructure.TIM_ICSelection = TIM_ICSelection_DirectTI;
TIM_ICInitStructure.TIM_ICPrescaler = TIM_ICPSC_DIV1;
TIM_ICInitStructure.TIM_ICFilter = 0;
TIM_ICInit(TIM2, &TIM_ICInitStructure);
/* TIM2 IRQChannel enable */
NVIC_InitStructure.NVIC_IRQChannel = TIM2_IRQn;
NVIC_InitStructure.NVIC_IRQChannelPriority = 0;
NVIC_InitStructure.NVIC_IRQChannelCmd = ENABLE;
NVIC_Init(&NVIC_InitStructure);
/* Enable capture interrupt */
TIM_ITConfig(TIM2, TIM_IT_CC4, ENABLE);
/* Enable the TIM2 counter */
TIM_Cmd(TIM2, ENABLE);
/* Reset the flags */
TIM2->SR = 0;
}
/**
* @brief Display measured pulse width on color LCD
* @param None
* @retval None
*/
static void DisplayOnLCD(uint32_t value)
{
uint8_t text[50];
sprintf((char*)text,"PulseWidth = %d us ",value);
LCD_DisplayStringLine(LINE(5),text);
}
#ifdef USE_FULL_ASSERT
/**
* @brief Reports the name of the source file and the source line number
* where the assert_param error has occurred.
* @param file: pointer to the source file name
* @param line: assert_param error line source number
* @retval None
*/
void assert_failed(uint8_t* file, uint32_t line)
{
/* User can add his own implementation to report the file name and line number,
ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */
/* Infinite loop */
while (1)
{
}
}
#endif
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
| 30.102662 | 99 | 0.651636 |
07181c8116cb746df41766c3ce7f71b97ae11caa | 20,508 | c | C | third-party/libfabric/libfabric-src/prov/tcp/src/tcpx_progress.c | jhh67/chapel | f041470e9b88b5fc4914c75aa5a37efcb46aa08f | [
"ECL-2.0",
"Apache-2.0"
] | 1,602 | 2015-01-06T11:26:31.000Z | 2022-03-30T06:17:21.000Z | third-party/libfabric/libfabric-src/prov/tcp/src/tcpx_progress.c | jhh67/chapel | f041470e9b88b5fc4914c75aa5a37efcb46aa08f | [
"ECL-2.0",
"Apache-2.0"
] | 11,789 | 2015-01-05T04:50:15.000Z | 2022-03-31T23:39:19.000Z | third-party/libfabric/libfabric-src/prov/tcp/src/tcpx_progress.c | jhh67/chapel | f041470e9b88b5fc4914c75aa5a37efcb46aa08f | [
"ECL-2.0",
"Apache-2.0"
] | 498 | 2015-01-08T18:58:18.000Z | 2022-03-20T15:37:45.000Z | /*
* Copyright (c) 2017 Intel Corporation, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/fi_errno.h>
#include <ofi_prov.h>
#include "tcpx.h"
#include <poll.h>
#include <sys/types.h>
#include <ifaddrs.h>
#include <net/if.h>
#include <ofi_util.h>
#include <ofi_iov.h>
static void tcpx_process_tx_entry(struct tcpx_xfer_entry *tx_entry)
{
struct tcpx_cq *tcpx_cq;
int ret;
ret = tcpx_send_msg(tx_entry);
if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret))
return;
/* Keep this path below as a single pass path.*/
tx_entry->ep->hdr_bswap(&tx_entry->hdr.base_hdr);
slist_remove_head(&tx_entry->ep->tx_queue);
if (ret) {
FI_WARN(&tcpx_prov, FI_LOG_DOMAIN, "msg send failed\n");
tcpx_cq_report_error(tx_entry->ep->util_ep.tx_cq,
tx_entry, -ret);
} else {
if (tx_entry->hdr.base_hdr.flags &
(OFI_DELIVERY_COMPLETE | OFI_COMMIT_COMPLETE)) {
slist_insert_tail(&tx_entry->entry,
&tx_entry->ep->tx_rsp_pend_queue);
return;
}
tcpx_cq_report_success(tx_entry->ep->util_ep.tx_cq, tx_entry);
}
tcpx_cq = container_of(tx_entry->ep->util_ep.tx_cq,
struct tcpx_cq, util_cq);
tcpx_xfer_entry_free(tcpx_cq, tx_entry);
}
static int tcpx_prepare_rx_entry_resp(struct tcpx_xfer_entry *rx_entry)
{
struct tcpx_cq *tcpx_tx_cq;
struct tcpx_xfer_entry *resp_entry;
tcpx_tx_cq = container_of(rx_entry->ep->util_ep.tx_cq,
struct tcpx_cq, util_cq);
resp_entry = tcpx_xfer_entry_alloc(tcpx_tx_cq, TCPX_OP_MSG_RESP);
if (!resp_entry)
return -FI_EAGAIN;
resp_entry->iov[0].iov_base = (void *) &resp_entry->hdr;
resp_entry->iov[0].iov_len = sizeof(resp_entry->hdr.base_hdr);
resp_entry->iov_cnt = 1;
resp_entry->hdr.base_hdr.op = ofi_op_msg;
resp_entry->hdr.base_hdr.size = sizeof(resp_entry->hdr.base_hdr);
resp_entry->hdr.base_hdr.payload_off =
(uint8_t)sizeof(resp_entry->hdr.base_hdr);
resp_entry->flags = 0;
resp_entry->context = NULL;
resp_entry->rem_len = sizeof(resp_entry->hdr.base_hdr);
resp_entry->ep = rx_entry->ep;
resp_entry->ep->hdr_bswap(&resp_entry->hdr.base_hdr);
tcpx_tx_queue_insert(resp_entry->ep, resp_entry);
tcpx_cq_report_success(rx_entry->ep->util_ep.rx_cq, rx_entry);
tcpx_rx_entry_free(rx_entry);
return FI_SUCCESS;
}
static int tcpx_update_rx_iov(struct tcpx_xfer_entry *rx_entry)
{
struct fi_cq_data_entry cq_entry;
int ret;
assert(tcpx_dynamic_rbuf(rx_entry->ep));
cq_entry.op_context = rx_entry->context;
cq_entry.flags = rx_entry->flags;
cq_entry.len = (rx_entry->hdr.base_hdr.size -
rx_entry->hdr.base_hdr.payload_off) -
rx_entry->rem_len;
cq_entry.buf = rx_entry->mrecv_msg_start;
cq_entry.data = 0;
rx_entry->iov_cnt = TCPX_IOV_LIMIT;
ret = (int) tcpx_dynamic_rbuf(rx_entry->ep)->
get_rbuf(&cq_entry, &rx_entry->iov[0], &rx_entry->iov_cnt);
if (ret) {
FI_WARN(&tcpx_prov, FI_LOG_EP_DATA,
"get_rbuf callback failed %s\n",
fi_strerror(-ret));
return ret;
}
assert(rx_entry->iov_cnt <= TCPX_IOV_LIMIT);
ret = ofi_truncate_iov(rx_entry->iov, &rx_entry->iov_cnt,
rx_entry->rem_len);
if (ret) {
FI_WARN(&tcpx_prov, FI_LOG_EP_DATA,
"dynamically provided rbuf is too small\n");
return ret;
}
return 0;
}
static int tcpx_process_recv(struct tcpx_xfer_entry *rx_entry)
{
int ret;
retry:
ret = tcpx_recv_msg_data(rx_entry);
if (ret) {
if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret))
return ret;
FI_WARN(&tcpx_prov, FI_LOG_EP_DATA,
"msg recv failed ret = %d (%s)\n", ret,
fi_strerror(-ret));
goto shutdown;
}
if (rx_entry->flags & TCPX_NEED_DYN_RBUF) {
ret = tcpx_update_rx_iov(rx_entry);
if (ret)
goto shutdown;
rx_entry->flags &= ~TCPX_NEED_DYN_RBUF;
rx_entry->rem_len = 0;
goto retry;
}
if (rx_entry->hdr.base_hdr.flags & OFI_DELIVERY_COMPLETE) {
if (tcpx_prepare_rx_entry_resp(rx_entry))
rx_entry->ep->cur_rx_proc_fn = tcpx_prepare_rx_entry_resp;
} else {
tcpx_cq_report_success(rx_entry->ep->util_ep.rx_cq, rx_entry);
tcpx_rx_entry_free(rx_entry);
}
return 0;
shutdown:
tcpx_ep_disable(rx_entry->ep, 0);
tcpx_cq_report_error(rx_entry->ep->util_ep.rx_cq, rx_entry, -ret);
tcpx_rx_entry_free(rx_entry);
return ret;
}
static int tcpx_prepare_rx_write_resp(struct tcpx_xfer_entry *rx_entry)
{
struct tcpx_cq *tcpx_rx_cq, *tcpx_tx_cq;
struct tcpx_xfer_entry *resp_entry;
tcpx_tx_cq = container_of(rx_entry->ep->util_ep.tx_cq,
struct tcpx_cq, util_cq);
resp_entry = tcpx_xfer_entry_alloc(tcpx_tx_cq, TCPX_OP_MSG_RESP);
if (!resp_entry)
return -FI_EAGAIN;
resp_entry->iov[0].iov_base = (void *) &resp_entry->hdr;
resp_entry->iov[0].iov_len = sizeof(resp_entry->hdr.base_hdr);
resp_entry->iov_cnt = 1;
resp_entry->hdr.base_hdr.op = ofi_op_msg;
resp_entry->hdr.base_hdr.size = sizeof(resp_entry->hdr.base_hdr);
resp_entry->hdr.base_hdr.payload_off = (uint8_t)
sizeof(resp_entry->hdr.base_hdr);
resp_entry->flags &= ~FI_COMPLETION;
resp_entry->context = NULL;
resp_entry->rem_len = resp_entry->hdr.base_hdr.size;
resp_entry->ep = rx_entry->ep;
resp_entry->ep->hdr_bswap(&resp_entry->hdr.base_hdr);
tcpx_tx_queue_insert(resp_entry->ep, resp_entry);
tcpx_cq_report_success(rx_entry->ep->util_ep.rx_cq, rx_entry);
tcpx_rx_cq = container_of(rx_entry->ep->util_ep.rx_cq,
struct tcpx_cq, util_cq);
tcpx_xfer_entry_free(tcpx_rx_cq, rx_entry);
return FI_SUCCESS;
}
static void tcpx_pmem_commit(struct tcpx_xfer_entry *rx_entry)
{
struct ofi_rma_iov *rma_iov;
size_t offset;
int i;
if (!ofi_pmem_commit)
return ;
if (rx_entry->hdr.base_hdr.flags &
OFI_REMOTE_CQ_DATA)
offset = sizeof(rx_entry->hdr.base_hdr) + sizeof(uint64_t);
else
offset = sizeof(rx_entry->hdr.base_hdr);
rma_iov = (struct ofi_rma_iov *)((uint8_t *)&rx_entry->hdr + offset);
for (i = 0; i < rx_entry->hdr.base_hdr.rma_iov_cnt; i++) {
(*ofi_pmem_commit)((const void *) (uintptr_t) rma_iov[i].addr,
rma_iov[i].len);
}
}
static int tcpx_process_remote_write(struct tcpx_xfer_entry *rx_entry)
{
struct tcpx_cq *tcpx_cq;
int ret = FI_SUCCESS;
ret = tcpx_recv_msg_data(rx_entry);
if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret))
return ret;
if (ret) {
FI_WARN(&tcpx_prov, FI_LOG_DOMAIN,
"remote write Failed ret = %d\n",
ret);
tcpx_ep_disable(rx_entry->ep, 0);
tcpx_cq_report_error(rx_entry->ep->util_ep.rx_cq, rx_entry, -ret);
tcpx_cq = container_of(rx_entry->ep->util_ep.rx_cq,
struct tcpx_cq, util_cq);
tcpx_xfer_entry_free(tcpx_cq, rx_entry);
} else if (rx_entry->hdr.base_hdr.flags &
(OFI_DELIVERY_COMPLETE | OFI_COMMIT_COMPLETE)) {
if (rx_entry->hdr.base_hdr.flags & OFI_COMMIT_COMPLETE)
tcpx_pmem_commit(rx_entry);
if (tcpx_prepare_rx_write_resp(rx_entry))
rx_entry->ep->cur_rx_proc_fn = tcpx_prepare_rx_write_resp;
} else {
tcpx_cq_report_success(rx_entry->ep->util_ep.rx_cq, rx_entry);
tcpx_cq = container_of(rx_entry->ep->util_ep.rx_cq,
struct tcpx_cq, util_cq);
tcpx_xfer_entry_free(tcpx_cq, rx_entry);
}
return ret;
}
static int tcpx_process_remote_read(struct tcpx_xfer_entry *rx_entry)
{
struct tcpx_cq *tcpx_cq;
int ret = FI_SUCCESS;
ret = tcpx_recv_msg_data(rx_entry);
if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret))
return ret;
if (ret) {
FI_WARN(&tcpx_prov, FI_LOG_DOMAIN,
"msg recv Failed ret = %d\n", ret);
tcpx_ep_disable(rx_entry->ep, 0);
tcpx_cq_report_error(rx_entry->ep->util_ep.tx_cq, rx_entry, -ret);
} else {
tcpx_cq_report_success(rx_entry->ep->util_ep.tx_cq, rx_entry);
}
slist_remove_head(&rx_entry->ep->rma_read_queue);
tcpx_cq = container_of(rx_entry->ep->util_ep.tx_cq,
struct tcpx_cq, util_cq);
tcpx_xfer_entry_free(tcpx_cq, rx_entry);
return ret;
}
static void tcpx_copy_rma_iov_to_msg_iov(struct tcpx_xfer_entry *xfer_entry)
{
struct ofi_rma_iov *rma_iov;
size_t offset;
int i;
if (xfer_entry->hdr.base_hdr.flags &
OFI_REMOTE_CQ_DATA)
offset = sizeof(xfer_entry->hdr.base_hdr) + sizeof(uint64_t);
else
offset = sizeof(xfer_entry->hdr.base_hdr);
rma_iov = (struct ofi_rma_iov *) ((uint8_t *) &xfer_entry->hdr + offset);
xfer_entry->iov_cnt = xfer_entry->hdr.base_hdr.rma_iov_cnt;
for ( i = 0 ; i < xfer_entry->hdr.base_hdr.rma_iov_cnt; i++ ) {
xfer_entry->iov[i].iov_base = (void *) rma_iov[i].addr;
xfer_entry->iov[i].iov_len = rma_iov[i].len;
}
}
static int tcpx_prepare_rx_remote_read_resp(struct tcpx_xfer_entry *resp_entry)
{
struct ofi_rma_iov *rma_iov;
int i;
resp_entry->iov[0].iov_base = (void *) &resp_entry->hdr;
resp_entry->iov[0].iov_len = sizeof(resp_entry->hdr.base_hdr);
rma_iov = (struct ofi_rma_iov *) ((uint8_t *)
&resp_entry->hdr + sizeof(resp_entry->hdr.base_hdr));
resp_entry->iov_cnt = 1 + resp_entry->hdr.base_hdr.rma_iov_cnt;
resp_entry->hdr.base_hdr.size = resp_entry->iov[0].iov_len;
for ( i = 0 ; i < resp_entry->hdr.base_hdr.rma_iov_cnt ; i++ ) {
resp_entry->iov[i+1].iov_base = (void *) (uintptr_t)rma_iov[i].addr;
resp_entry->iov[i+1].iov_len = rma_iov[i].len;
resp_entry->hdr.base_hdr.size += resp_entry->iov[i+1].iov_len;
}
resp_entry->hdr.base_hdr.op = ofi_op_read_rsp;
resp_entry->hdr.base_hdr.payload_off = (uint8_t)
sizeof(resp_entry->hdr.base_hdr);
resp_entry->flags &= ~FI_COMPLETION;
resp_entry->context = NULL;
resp_entry->rem_len = resp_entry->hdr.base_hdr.size;
resp_entry->ep->hdr_bswap(&resp_entry->hdr.base_hdr);
tcpx_tx_queue_insert(resp_entry->ep, resp_entry);
resp_entry->ep->cur_rx_entry = NULL;
return FI_SUCCESS;
}
static int tcpx_validate_rx_rma_data(struct tcpx_xfer_entry *rx_entry,
uint64_t access)
{
struct ofi_mr_map *map = &rx_entry->ep->util_ep.domain->mr_map;
struct ofi_rma_iov *rma_iov;
size_t offset;
int i, ret;
if (rx_entry->hdr.base_hdr.flags & OFI_REMOTE_CQ_DATA)
offset = sizeof(rx_entry->hdr.base_hdr) + sizeof(uint64_t);
else
offset = sizeof(rx_entry->hdr.base_hdr);
rma_iov = (struct ofi_rma_iov *) ((uint8_t *) &rx_entry->hdr + offset);
for ( i = 0 ; i < rx_entry->hdr.base_hdr.rma_iov_cnt ; i++) {
ret = ofi_mr_verify(map, rma_iov[i].len,
(uintptr_t *)&rma_iov[i].addr,
rma_iov[i].key, access);
if (ret) {
FI_WARN(&tcpx_prov, FI_LOG_EP_DATA,
"invalid rma iov received\n");
return -FI_EINVAL;
}
}
return FI_SUCCESS;
}
int tcpx_op_invalid(struct tcpx_ep *tcpx_ep)
{
return -FI_EINVAL;
}
/* Must hold ep lock */
static struct tcpx_xfer_entry *tcpx_rx_entry_alloc(struct tcpx_ep *ep)
{
struct tcpx_xfer_entry *rx_entry;
if (slist_empty(&ep->rx_queue))
return NULL;
rx_entry = container_of(ep->rx_queue.head, struct tcpx_xfer_entry,
entry);
slist_remove_head(&ep->rx_queue);
return rx_entry;
}
static void tcpx_rx_setup(struct tcpx_ep *ep, struct tcpx_xfer_entry *rx_entry,
tcpx_rx_process_fn_t process_fn)
{
ep->cur_rx_entry = rx_entry;
ep->cur_rx_proc_fn = process_fn;
/* Reset to receive next message */
ep->cur_rx_msg.hdr_len = sizeof(ep->cur_rx_msg.hdr.base_hdr);
ep->cur_rx_msg.done_len = 0;
}
int tcpx_op_msg(struct tcpx_ep *tcpx_ep)
{
struct tcpx_xfer_entry *rx_entry;
struct tcpx_xfer_entry *tx_entry;
struct tcpx_cq *tcpx_cq;
struct tcpx_cur_rx_msg *cur_rx_msg = &tcpx_ep->cur_rx_msg;
size_t msg_len;
int ret;
if (cur_rx_msg->hdr.base_hdr.op_data == TCPX_OP_MSG_RESP) {
assert(!slist_empty(&tcpx_ep->tx_rsp_pend_queue));
tx_entry = container_of(tcpx_ep->tx_rsp_pend_queue.head,
struct tcpx_xfer_entry, entry);
tcpx_cq = container_of(tcpx_ep->util_ep.tx_cq, struct tcpx_cq,
util_cq);
tcpx_cq_report_success(tx_entry->ep->util_ep.tx_cq, tx_entry);
slist_remove_head(&tx_entry->ep->tx_rsp_pend_queue);
tcpx_xfer_entry_free(tcpx_cq, tx_entry);
tcpx_rx_setup(tcpx_ep, NULL, NULL);
return -FI_EAGAIN;
}
msg_len = (tcpx_ep->cur_rx_msg.hdr.base_hdr.size -
tcpx_ep->cur_rx_msg.hdr.base_hdr.payload_off);
if (tcpx_ep->srx_ctx) {
rx_entry = tcpx_srx_entry_alloc(tcpx_ep->srx_ctx, tcpx_ep);
if (!rx_entry)
return -FI_EAGAIN;
rx_entry->flags |= tcpx_ep->util_ep.rx_op_flags & FI_COMPLETION;
} else {
rx_entry = tcpx_rx_entry_alloc(tcpx_ep);
if (!rx_entry)
return -FI_EAGAIN;
}
memcpy(&rx_entry->hdr, &tcpx_ep->cur_rx_msg.hdr,
(size_t) tcpx_ep->cur_rx_msg.hdr.base_hdr.payload_off);
rx_entry->ep = tcpx_ep;
rx_entry->hdr.base_hdr.op_data = TCPX_OP_MSG_RECV;
rx_entry->mrecv_msg_start = rx_entry->iov[0].iov_base;
if (tcpx_dynamic_rbuf(tcpx_ep))
rx_entry->flags |= TCPX_NEED_DYN_RBUF;
ret = ofi_truncate_iov(rx_entry->iov, &rx_entry->iov_cnt, msg_len);
if (ret) {
if (!tcpx_dynamic_rbuf(tcpx_ep))
goto truncate_err;
rx_entry->rem_len = msg_len -
ofi_total_iov_len(rx_entry->iov,
rx_entry->iov_cnt);
}
if (cur_rx_msg->hdr.base_hdr.flags & OFI_REMOTE_CQ_DATA)
rx_entry->flags |= FI_REMOTE_CQ_DATA;
tcpx_rx_setup(tcpx_ep, rx_entry, tcpx_process_recv);
return FI_SUCCESS;
truncate_err:
FI_WARN(&tcpx_prov, FI_LOG_EP_DATA,
"posted rx buffer size is not big enough\n");
tcpx_cq_report_error(rx_entry->ep->util_ep.rx_cq,
rx_entry, -ret);
tcpx_rx_entry_free(rx_entry);
return ret;
}
int tcpx_op_read_req(struct tcpx_ep *tcpx_ep)
{
struct tcpx_xfer_entry *rx_entry;
struct tcpx_cq *tcpx_cq;
int ret;
/* The read request will generate a response once done,
* so the xfer_entry will become a transmit and returned
* to the tx cq buffer pool.
*/
tcpx_cq = container_of(tcpx_ep->util_ep.tx_cq,
struct tcpx_cq, util_cq);
rx_entry = tcpx_xfer_entry_alloc(tcpx_cq, TCPX_OP_REMOTE_READ);
if (!rx_entry)
return -FI_EAGAIN;
memcpy(&rx_entry->hdr, &tcpx_ep->cur_rx_msg.hdr,
(size_t) tcpx_ep->cur_rx_msg.hdr.base_hdr.payload_off);
rx_entry->hdr.base_hdr.op_data = TCPX_OP_REMOTE_READ;
rx_entry->ep = tcpx_ep;
ret = tcpx_validate_rx_rma_data(rx_entry, FI_REMOTE_READ);
if (ret) {
FI_WARN(&tcpx_prov, FI_LOG_DOMAIN,
"invalid rma data\n");
tcpx_xfer_entry_free(tcpx_cq, rx_entry);
return ret;
}
tcpx_rx_setup(tcpx_ep, rx_entry, tcpx_prepare_rx_remote_read_resp);
return FI_SUCCESS;
}
int tcpx_op_write(struct tcpx_ep *tcpx_ep)
{
struct tcpx_xfer_entry *rx_entry;
struct tcpx_cq *tcpx_cq;
int ret;
tcpx_cq = container_of(tcpx_ep->util_ep.rx_cq,
struct tcpx_cq, util_cq);
rx_entry = tcpx_xfer_entry_alloc(tcpx_cq, TCPX_OP_REMOTE_WRITE);
if (!rx_entry)
return -FI_EAGAIN;
rx_entry->flags = 0;
if (tcpx_ep->cur_rx_msg.hdr.base_hdr.flags & OFI_REMOTE_CQ_DATA)
rx_entry->flags = (FI_COMPLETION |
FI_REMOTE_CQ_DATA | FI_REMOTE_WRITE);
memcpy(&rx_entry->hdr, &tcpx_ep->cur_rx_msg.hdr,
(size_t) tcpx_ep->cur_rx_msg.hdr.base_hdr.payload_off);
rx_entry->hdr.base_hdr.op_data = TCPX_OP_REMOTE_WRITE;
rx_entry->ep = tcpx_ep;
ret = tcpx_validate_rx_rma_data(rx_entry, FI_REMOTE_WRITE);
if (ret) {
FI_WARN(&tcpx_prov, FI_LOG_DOMAIN,
"invalid rma data\n");
tcpx_xfer_entry_free(tcpx_cq, rx_entry);
return ret;
}
tcpx_copy_rma_iov_to_msg_iov(rx_entry);
tcpx_rx_setup(tcpx_ep, rx_entry, tcpx_process_remote_write);
return FI_SUCCESS;
}
int tcpx_op_read_rsp(struct tcpx_ep *tcpx_ep)
{
struct tcpx_xfer_entry *rx_entry;
struct slist_entry *entry;
if (slist_empty(&tcpx_ep->rma_read_queue))
return -FI_EINVAL;
entry = tcpx_ep->rma_read_queue.head;
rx_entry = container_of(entry, struct tcpx_xfer_entry,
entry);
memcpy(&rx_entry->hdr, &tcpx_ep->cur_rx_msg.hdr,
(size_t) tcpx_ep->cur_rx_msg.hdr.base_hdr.payload_off);
rx_entry->hdr.base_hdr.op_data = TCPX_OP_READ_RSP;
tcpx_rx_setup(tcpx_ep, rx_entry, tcpx_process_remote_read);
return FI_SUCCESS;
}
static int tcpx_get_next_rx_hdr(struct tcpx_ep *ep)
{
ssize_t ret;
ret = tcpx_recv_hdr(ep->sock, &ep->stage_buf, &ep->cur_rx_msg);
if (ret < 0)
return (int) ret;
ep->cur_rx_msg.done_len += ret;
if (ep->cur_rx_msg.done_len >= sizeof(ep->cur_rx_msg.hdr.base_hdr)) {
if (ep->cur_rx_msg.hdr.base_hdr.payload_off > TCPX_MAX_HDR_SZ) {
FI_WARN(&tcpx_prov, FI_LOG_EP_DATA,
"Payload offset is too large\n");
return -FI_EIO;
}
ep->cur_rx_msg.hdr_len = (size_t) ep->cur_rx_msg.hdr.
base_hdr.payload_off;
if (ep->cur_rx_msg.hdr_len > ep->cur_rx_msg.done_len) {
ret = tcpx_recv_hdr(ep->sock, &ep->stage_buf,
&ep->cur_rx_msg);
if (ret < 0)
return (int) ret;
ep->cur_rx_msg.done_len += ret;
}
}
if (ep->cur_rx_msg.done_len < ep->cur_rx_msg.hdr_len)
return -FI_EAGAIN;
ep->hdr_bswap(&ep->cur_rx_msg.hdr.base_hdr);
return FI_SUCCESS;
}
/* Must hold ep lock */
void tcpx_progress_rx(struct tcpx_ep *ep)
{
int ret;
if (!ep->cur_rx_entry &&
(ep->stage_buf.cur_pos == ep->stage_buf.bytes_avail)) {
ret = tcpx_read_to_buffer(ep->sock, &ep->stage_buf);
if (ret)
goto err;
}
do {
if (!ep->cur_rx_entry) {
if (ep->cur_rx_msg.done_len < ep->cur_rx_msg.hdr_len) {
ret = tcpx_get_next_rx_hdr(ep);
if (ret)
goto err;
}
if (ep->cur_rx_msg.hdr.base_hdr.op >=
ARRAY_SIZE(ep->start_op)) {
FI_WARN(&tcpx_prov, FI_LOG_EP_DATA,
"Received invalid opcode\n");
ret = -FI_ENOTCONN; /* force shutdown */
goto err;
}
ret = ep->start_op[ep->cur_rx_msg.hdr.base_hdr.op](ep);
if (ret)
goto err;
}
assert(ep->cur_rx_proc_fn);
ep->cur_rx_proc_fn(ep->cur_rx_entry);
} while (ep->stage_buf.cur_pos < ep->stage_buf.bytes_avail);
return;
err:
if (OFI_SOCK_TRY_SND_RCV_AGAIN(-ret))
return;
if (ret == -FI_ENOTCONN)
tcpx_ep_disable(ep, 0);
}
/* Must hold ep lock */
void tcpx_progress_tx(struct tcpx_ep *ep)
{
struct tcpx_xfer_entry *tx_entry;
struct slist_entry *entry;
if (!slist_empty(&ep->tx_queue)) {
entry = ep->tx_queue.head;
tx_entry = container_of(entry, struct tcpx_xfer_entry, entry);
tcpx_process_tx_entry(tx_entry);
}
}
int tcpx_try_func(void *util_ep)
{
uint32_t events;
struct util_wait_fd *wait_fd;
struct tcpx_ep *ep;
int ret;
ep = container_of(util_ep, struct tcpx_ep, util_ep);
wait_fd = container_of(((struct util_ep *) util_ep)->tx_cq->wait,
struct util_wait_fd, util_wait);
fastlock_acquire(&ep->lock);
if (!slist_empty(&ep->tx_queue) && !ep->pollout_set) {
ep->pollout_set = true;
events = (wait_fd->util_wait.wait_obj == FI_WAIT_FD) ?
(OFI_EPOLL_IN | OFI_EPOLL_OUT) : (POLLIN | POLLOUT);
goto epoll_mod;
} else if (slist_empty(&ep->tx_queue) && ep->pollout_set) {
ep->pollout_set = false;
events = (wait_fd->util_wait.wait_obj == FI_WAIT_FD) ?
OFI_EPOLL_IN : POLLIN;
goto epoll_mod;
}
fastlock_release(&ep->lock);
return FI_SUCCESS;
epoll_mod:
ret = (wait_fd->util_wait.wait_obj == FI_WAIT_FD) ?
ofi_epoll_mod(wait_fd->epoll_fd, ep->sock, events,
&ep->util_ep.ep_fid.fid) :
ofi_pollfds_mod(wait_fd->pollfds, ep->sock, events,
&ep->util_ep.ep_fid.fid);
if (ret)
FI_WARN(&tcpx_prov, FI_LOG_EP_DATA,
"epoll modify failed\n");
fastlock_release(&ep->lock);
return ret;
}
void tcpx_tx_queue_insert(struct tcpx_ep *tcpx_ep,
struct tcpx_xfer_entry *tx_entry)
{
int empty;
struct util_wait *wait = tcpx_ep->util_ep.tx_cq->wait;
empty = slist_empty(&tcpx_ep->tx_queue);
slist_insert_tail(&tx_entry->entry, &tcpx_ep->tx_queue);
if (empty) {
tcpx_process_tx_entry(tx_entry);
if (!slist_empty(&tcpx_ep->tx_queue) && wait)
wait->signal(wait);
}
}
| 27.86413 | 79 | 0.723425 |
c2bbc70aeb2bbc71137b7c159848fb035fd86357 | 984 | h | C | terrain.h | nuggs/gor_terrain | 42ea846d127e0eb8229657878a4bdd46439e0b9f | [
"MIT"
] | null | null | null | terrain.h | nuggs/gor_terrain | 42ea846d127e0eb8229657878a4bdd46439e0b9f | [
"MIT"
] | 1 | 2021-06-21T18:47:32.000Z | 2021-06-21T18:47:32.000Z | terrain.h | nuggs/gor_terrain | 42ea846d127e0eb8229657878a4bdd46439e0b9f | [
"MIT"
] | null | null | null | /*
* copyright shit
*/
#ifndef TERRAIN_H
#define TERRAIN_H
//#include "core/object.h"
/* Using object right now, may change i don't know */
class Terrain : public Object {
GDCLASS(Terrain, Object);
public:
Terrain(void);
virtual ~Terrain(void) {}
virtual void synchronize(double time) {}
virtual void advance(double step) {}
virtual double get_height(const Vector3 location) const;
virtual Vector3 get_normal(const Vector3 location) const;
virtual float get_coefficient_friction(const Vector3 location) const;
class friction_functor {
public:
virtual ~friction_functor(void) {}
virtual float operator()(const Vector3 location) = 0;
};
void register_friction_functor(friction_functor *functor) { friction_func = functor; }
protected:
friction_functor *friction_func;
static void _bind_methods(void);
};
#endif // TERRAIN_H
| 26.594595 | 94 | 0.651423 |
907cad7a9aa99486f1fddbc7ba71edc3f62413e1 | 473 | h | C | SDLTuts/Dot.h | Amaranthos/SDLPlayground | 693a0430c2d814089408bd6c55d5174c1469ea73 | [
"MIT"
] | null | null | null | SDLTuts/Dot.h | Amaranthos/SDLPlayground | 693a0430c2d814089408bd6c55d5174c1469ea73 | [
"MIT"
] | null | null | null | SDLTuts/Dot.h | Amaranthos/SDLPlayground | 693a0430c2d814089408bd6c55d5174c1469ea73 | [
"MIT"
] | null | null | null | #ifndef DOT_H
#define DOT_H
#include <SDL.h>
#include "Tile.h"
#include "Texture.h"
class Dot {
public:
Dot();
~Dot();
static const int DOT_WIDTH = 20;
static const int DOT_HEIGHT = 20;
static const int DOT_VEL = 10;
void HandleEvent(SDL_Event& event);
void Move(Game::Tile* tiles[]);
void SetCamera(SDL_Rect& camera);
void Render(SDL_Rect& camera, SDL_Renderer* renderer);
Game::Texture* sprite;
private:
SDL_Rect box;
int velX, velY;
};
#endif //DOT_H | 15.766667 | 55 | 0.699789 |
e6fa77e76fee9ac8e1b7b5db46b7fbc0807c92b2 | 1,449 | c | C | src/host/path_wildcards.c | Euclideon/premake-core | 9e7728f95239070fac76b9b5a66d9d8c78fde802 | [
"BSD-3-Clause"
] | null | null | null | src/host/path_wildcards.c | Euclideon/premake-core | 9e7728f95239070fac76b9b5a66d9d8c78fde802 | [
"BSD-3-Clause"
] | 1 | 2021-04-19T19:48:04.000Z | 2021-04-19T19:48:04.000Z | src/host/path_wildcards.c | Euclideon/premake-core | 9e7728f95239070fac76b9b5a66d9d8c78fde802 | [
"BSD-3-Clause"
] | 1 | 2021-04-03T20:40:18.000Z | 2021-04-03T20:40:18.000Z | /**
* \file path_wildcards.c
* \brief Converts from a simple wildcard syntax to the corresponding Lua pattern.
* \author Copyright (c) 2015 Tom van Dijck, Jason Perkins and the Premake project
*/
#include "premake.h"
#include <string.h>
#include <stdlib.h>
/*
--Converts from a simple wildcard syntax, where * is "match any"
-- and ** is "match recursive", to the corresponding Lua pattern.
--
-- @param pattern
-- The wildcard pattern to convert.
-- @returns
-- The corresponding Lua pattern.
*/
int path_wildcards(lua_State* L)
{
size_t length, i;
const char* input;
char buffer[0x4000];
char* output;
input = luaL_checklstring(L, 1, &length);
output = buffer;
for (i = 0; i < length; ++i)
{
char c = input[i];
switch (c)
{
case '+':
case '.':
case '-':
case '^':
case '$':
case '(':
case ')':
case '%':
*(output++) = '%';
*(output++) = c;
break;
case '*':
if ((i + 1) < length && input[i + 1] == '*')
{
i++; // skip the next character.
*(output++) = '.';
*(output++) = '*';
}
else
{
*(output++) = '[';
*(output++) = '^';
*(output++) = '/';
*(output++) = ']';
*(output++) = '*';
}
break;
default:
*(output++) = c;
break;
}
if (output >= buffer + sizeof(buffer))
{
lua_pushstring(L, "Wildcards expansion too big.");
lua_error(L);
return 0;
}
}
*(output++) = '\0';
lua_pushstring(L, buffer);
return 1;
}
| 17.670732 | 83 | 0.545204 |
76da2172dcda9cd253524c5646ea14547bf89251 | 6,421 | h | C | core/arch/arm/plat-rcar_gen4/platform_config.h | lorc/optee_os-rcar | c6f7ca0131063fcd93a80970d79adeae20ddd354 | [
"BSD-2-Clause"
] | 4 | 2017-12-27T01:52:12.000Z | 2021-06-09T02:16:09.000Z | core/arch/arm/plat-rcar_gen4/platform_config.h | lorc/optee_os-rcar | c6f7ca0131063fcd93a80970d79adeae20ddd354 | [
"BSD-2-Clause"
] | 5 | 2018-10-08T17:14:44.000Z | 2022-03-28T06:48:47.000Z | core/arch/arm/plat-rcar_gen4/platform_config.h | lorc/optee_os-rcar | c6f7ca0131063fcd93a80970d79adeae20ddd354 | [
"BSD-2-Clause"
] | 8 | 2017-05-24T16:47:25.000Z | 2022-02-05T00:22:59.000Z | /* SPDX-License-Identifier: BSD-2-Clause */
/*
* Copyright (c) 2016, GlobalLogic
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2021, Renesas Electronics Corporation.
*/
#ifndef PLATFORM_CONFIG_H
#define PLATFORM_CONFIG_H
/* Make stacks aligned to data cache line length */
#define STACK_ALIGNMENT 64
#define GICC_BASE 0xF1060000U
#define GICD_BASE 0xF1000000U
/*
* Last part of DRAM is reserved as secure dram, note that the last 2MiB
* of DRAM0 is used by SCP dor DDR retraining.
*/
#define TZDRAM_BASE (0x44000000U)
/*
* Should be
* #define TZDRAM_SIZE 0x00FF8000
* but is smaller due to SECTION_SIZE alignment, can be fixed once
* OP-TEE OS is mapped using small pages instead.
*/
#define TZDRAM_SIZE (0x02400000U)
#define TEE_RAM_VA_SIZE (1024 * 1024 * 3)
#define TEE_RAM_START (0x44100000) /* TEE RAM address */
#define TEE_RAM_PH_SIZE (0x00300000U) /* TEE RAM size */
#define TA_RAM_START (0x44400000U) /* TA RAM address */
#define TA_RAM_SIZE (0x01800000U) /* TA RAM size */
#define TEE_SHMEM_START (0x47E00000U) /* Share Memory address */
#define TEE_SHMEM_SIZE (0x00100000U) /* Share Memory size */
#define OPTEE_LOG_BASE (0x45E00000U) /* OP-TEE Log Area address */
#define OPTEE_LOG_NS_BASE (0x47FEC000U) /* OP-TEE Log Area NS addr */
#define OPTEE_LOG_NS_SIZE (0x00014000U) /* OP-TEE Log Area NS size */
#define TA_VERIFICATION_BASE (0x45C00000U) /* TA area for verification */
#define TA_VERIFICATION_SIZE (0x00100000U) /* TA verification size */
#define CRYPTOENGINE_WORK_BASE (0x46000000U) /* Crypto Enegine Work area */
#define NONCACHE_WORK_BASE (0x45F00000U) /* Non Cache Area address */
#define NONCACHE_WORK_SIZE (0x00100000U) /* Non Cache Area Size */
#define LIFEC_BASE (0xE6110000U) /* Life Cycle address */
#define RST_BASE (0xE6160000U) /* Reset address */
#define CE_BASE (0xE6600000U) /* Crypto Engine address */
#define RPC_BASE (0xEE200000U) /* RPC address */
#define PRR_BASE (0xFFF00000U) /* Product Register address */
#define RPC_ADDR_MAP_BASE (0x08000000U) /* RPC Internal address */
#define RPC_ADDR_MAP_SIZE (0x04000000U) /* RPC Address Map size */
#define MEM_SECTION_SIZE (0x00100000U)
#if defined(PLATFORM_FLAVOR_salvator_h3)
#define NSEC_DDR_0_BASE 0x47E00000
#define NSEC_DDR_0_SIZE 0x38200000
#define NSEC_DDR_1_BASE 0x500000000U
#define NSEC_DDR_1_SIZE 0x40000000
#define NSEC_DDR_2_BASE 0x600000000U
#define NSEC_DDR_2_SIZE 0x40000000
#define NSEC_DDR_3_BASE 0x700000000U
#define NSEC_DDR_3_SIZE 0x40000000
#elif defined(PLATFORM_FLAVOR_salvator_h3_4x2g)
#define NSEC_DDR_0_BASE 0x47E00000
#define NSEC_DDR_0_SIZE 0x78200000
#define NSEC_DDR_1_BASE 0x500000000U
#define NSEC_DDR_1_SIZE 0x80000000U
#define NSEC_DDR_2_BASE 0x600000000U
#define NSEC_DDR_2_SIZE 0x80000000U
#define NSEC_DDR_3_BASE 0x700000000U
#define NSEC_DDR_3_SIZE 0x80000000U
#elif defined(PLATFORM_FLAVOR_salvator_m3)
#define NSEC_DDR_0_BASE 0x47E00000
#define NSEC_DDR_0_SIZE 0x78200000
#define NSEC_DDR_1_BASE 0x600000000U
#define NSEC_DDR_1_SIZE 0x80000000U
#elif defined(PLATFORM_FLAVOR_salvator_m3_2x4g)
#define NSEC_DDR_0_BASE 0x47E00000
#define NSEC_DDR_0_SIZE 0x78200000
#define NSEC_DDR_1_BASE 0x400000000U
#define NSEC_DDR_1_SIZE 0x80000000U
#define NSEC_DDR_2_BASE 0x600000000U
#define NSEC_DDR_2_SIZE 0x100000000U
#else
/* Generic DT-based platform */
#endif
#define DEVICE1_PA_BASE ROUNDDOWN(CE_BASE, CORE_MMU_PGDIR_SIZE)
#define DEVICE1_SIZE (MEM_SECTION_SIZE * 2)
#define DEVICE1_TYPE MEM_AREA_IO_SEC
#define DEVICE2_PA_BASE ROUNDDOWN(GICD_BASE, CORE_MMU_PGDIR_SIZE)
#define DEVICE2_SIZE (MEM_SECTION_SIZE)
#define DEVICE2_TYPE MEM_AREA_IO_SEC
/* LOG Area for Secure World */
#define MEMORY1_BASE ROUNDDOWN(OPTEE_LOG_BASE, MEM_SECTION_SIZE)
#define MEMORY1_SIZE (MEM_SECTION_SIZE)
#define MEMORY1_TYPE MEM_AREA_IO_SEC
/* LOG Area for Normal World */
#define MEMORY2_BASE (OPTEE_LOG_NS_BASE)
#define MEMORY2_SIZE (OPTEE_LOG_NS_SIZE)
#define MEMORY2_TYPE MEM_AREA_RAM_NSEC
/* Crypto Engine Work area */
#define MEMORY3_BASE ROUNDDOWN(CRYPTOENGINE_WORK_BASE, \
MEM_SECTION_SIZE)
#define MEMORY3_SIZE (MEM_SECTION_SIZE)
#define MEMORY3_TYPE MEM_AREA_RAM_SEC
/* TA area for verification */
#define MEMORY4_BASE ROUNDDOWN(TA_VERIFICATION_BASE, \
MEM_SECTION_SIZE)
#define MEMORY4_SIZE (TA_VERIFICATION_SIZE)
#define MEMORY4_TYPE MEM_AREA_IO_SEC
/* Product Register (PRR) */
#define MEMORY5_BASE ROUNDDOWN(PRR_BASE, MEM_SECTION_SIZE)
#define MEMORY5_SIZE (MEM_SECTION_SIZE)
#define MEMORY5_TYPE MEM_AREA_IO_SEC
/* Non Cache Stack Area */
#define MEMORY6_BASE ROUNDDOWN(NONCACHE_WORK_BASE, \
NONCACHE_WORK_SIZE)
#define MEMORY6_SIZE (MEM_SECTION_SIZE)
#define MEMORY6_TYPE MEM_AREA_IO_SEC
/* SPI Multi I/O Bus Controller (RPC) */
#define MEMORY7_BASE ROUNDDOWN(RPC_BASE, MEM_SECTION_SIZE)
#define MEMORY7_SIZE (MEM_SECTION_SIZE)
#define MEMORY7_TYPE MEM_AREA_IO_SEC
/* RPC Address Map */
#define MEMORY8_BASE ROUNDDOWN(RPC_ADDR_MAP_BASE, MEM_SECTION_SIZE)
#define MEMORY8_SIZE (RPC_ADDR_MAP_SIZE)
#define MEMORY8_TYPE MEM_AREA_IO_SEC
#endif /*PLATFORM_CONFIG_H*/
| 35.871508 | 79 | 0.787572 |
f72d691903d8731ffc1c287144520d31493cde9b | 905 | h | C | Frameworks/VideoSubscriberAccount.framework/VSAJSSAMLResponse.h | shaojiankui/iOS10-Runtime-Headers | 6b0d842bed0c52c2a7c1464087b3081af7e10c43 | [
"MIT"
] | 36 | 2016-04-20T04:19:04.000Z | 2018-10-08T04:12:25.000Z | Frameworks/VideoSubscriberAccount.framework/VSAJSSAMLResponse.h | shaojiankui/iOS10-Runtime-Headers | 6b0d842bed0c52c2a7c1464087b3081af7e10c43 | [
"MIT"
] | null | null | null | Frameworks/VideoSubscriberAccount.framework/VSAJSSAMLResponse.h | shaojiankui/iOS10-Runtime-Headers | 6b0d842bed0c52c2a7c1464087b3081af7e10c43 | [
"MIT"
] | 10 | 2016-06-16T02:40:44.000Z | 2019-01-15T03:31:45.000Z | /* Generated by RuntimeBrowser
Image: /System/Library/Frameworks/VideoSubscriberAccount.framework/VideoSubscriberAccount
*/
@interface VSAJSSAMLResponse : IKJSObject <NSCopying, VSAJSSAMLResponse> {
NSString * _authN;
NSString * _authZ;
NSString * _logout;
NSArray * _userChannelList;
NSString * _userMetadata;
}
@property (nonatomic, copy) NSString *authN;
@property (nonatomic, copy) NSString *authZ;
@property (nonatomic, copy) NSString *logout;
@property (nonatomic, copy) NSArray *userChannelList;
@property (nonatomic, copy) NSString *userMetadata;
- (void).cxx_destruct;
- (id)authN;
- (id)authZ;
- (id)copyWithZone:(struct _NSZone { }*)arg1;
- (id)init;
- (id)logout;
- (void)setAuthN:(id)arg1;
- (void)setAuthZ:(id)arg1;
- (void)setLogout:(id)arg1;
- (void)setUserChannelList:(id)arg1;
- (void)setUserMetadata:(id)arg1;
- (id)userChannelList;
- (id)userMetadata;
@end
| 26.617647 | 92 | 0.728177 |
f76ebcc1092e5f1bcf5b64e83ae58ab7c68be466 | 2,132 | h | C | Source/FSD/Public/CharacterStateComponent.h | trumank/DRG-Mods | 2febc879f2ffe83498ac913c114d0e933427e93e | [
"MIT"
] | null | null | null | Source/FSD/Public/CharacterStateComponent.h | trumank/DRG-Mods | 2febc879f2ffe83498ac913c114d0e933427e93e | [
"MIT"
] | null | null | null | Source/FSD/Public/CharacterStateComponent.h | trumank/DRG-Mods | 2febc879f2ffe83498ac913c114d0e933427e93e | [
"MIT"
] | null | null | null | #pragma once
#include "CoreMinimal.h"
#include "Components/ActorComponent.h"
#include "PlaySoundInterface.h"
#include "CharacterStateComponent.generated.h"
class UDialogDataAsset;
class APlayerCharacter;
class UPlayerMovementComponent;
UCLASS(Abstract, Blueprintable, ClassGroup=Custom, meta=(BlueprintSpawnableComponent))
class UCharacterStateComponent : public UActorComponent, public IPlaySoundInterface {
GENERATED_BODY()
public:
protected:
UPROPERTY(EditAnywhere)
uint8 StateId;
UPROPERTY(BlueprintReadWrite, EditAnywhere, Transient, meta=(AllowPrivateAccess=true))
APlayerCharacter* Character;
UPROPERTY(BlueprintReadWrite, EditAnywhere, meta=(AllowPrivateAccess=true))
UDialogDataAsset* EnterStateShout;
UPROPERTY(BlueprintReadWrite, EditAnywhere, meta=(AllowPrivateAccess=true))
UDialogDataAsset* ExitStateShout;
UPROPERTY(BlueprintReadWrite, EditAnywhere, meta=(AllowPrivateAccess=true))
UDialogDataAsset* AttentionShout;
private:
UPROPERTY(BlueprintReadWrite, EditAnywhere, Export, Transient, meta=(AllowPrivateAccess=true))
UPlayerMovementComponent* PlayerMovement;
public:
UCharacterStateComponent();
protected:
UFUNCTION(BlueprintCallable, BlueprintImplementableEvent)
void ReceiveStateTick(float DeltaTime);
UFUNCTION(BlueprintCallable, BlueprintImplementableEvent)
void ReceiveStateExit();
UFUNCTION(BlueprintCallable, BlueprintImplementableEvent)
void ReceiveStateEnter();
public:
UFUNCTION(BlueprintCallable, BlueprintPure)
bool IsLocallyControlled() const;
UFUNCTION(BlueprintCallable, BlueprintPure)
bool HasAuthority() const;
UFUNCTION(BlueprintCallable, BlueprintPure)
float GetStateTime() const;
UFUNCTION(BlueprintPure)
uint8 GetStateID() const;
UFUNCTION(BlueprintCallable, BlueprintPure)
bool GetStateActive() const;
protected:
UFUNCTION(BlueprintCallable, BlueprintPure)
UPlayerMovementComponent* GetCharacterMovement() const;
// Fix for true pure virtual functions not being implemented
};
| 30.028169 | 98 | 0.767355 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.