blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dfdcfdc5825c1a11bb051c9421205e7eb9ee4882
|
68e115d504decf90cf8adaf1c373a220c059d8d7
|
/Library/InterruptsLib/Interrupts.c
|
37f74acca36c334387824194c58acf6fb6c5da57
|
[
"GPL-1.0-or-later",
"GPL-2.0-only",
"BSD-2-Clause"
] |
permissive
|
WOA-Project/Lumia950XLPkg
|
24fcad187d71488270b853d08fcdff542bbac5a2
|
0e7e24adb2ac80672ade1e56f9b538d99941328f
|
refs/heads/master
| 2023-08-11T09:20:00.600571
| 2022-09-14T09:09:51
| 2022-09-14T09:09:51
| 132,070,715
| 264
| 64
|
BSD-2-Clause
| 2022-12-18T20:27:21
| 2018-05-04T01:44:48
|
C
|
UTF-8
|
C
| false
| false
| 2,947
|
c
|
Interrupts.c
|
#include <PiDxe.h>
#include <Library/BaseLib.h>
#include <Library/DebugLib.h>
#include <Library/InterruptsLib.h>
#include <Library/MemoryAllocationLib.h>
#include <Library/UefiBootServicesTableLib.h>
#include <Protocol/HardwareInterrupt.h>
typedef struct {
UINTN Vector;
int_handler Handler;
VOID * Arg;
LIST_ENTRY Link;
} HANDLER_ENTRY;
STATIC EFI_HARDWARE_INTERRUPT_PROTOCOL *mInterrupt = NULL;
STATIC LIST_ENTRY mHandlers = INITIALIZE_LIST_HEAD_VARIABLE(mHandlers);
STATIC HANDLER_ENTRY *GetInterruptHandlerEntry(UINTN Vector)
{
LIST_ENTRY * Link;
HANDLER_ENTRY *Entry;
EFI_TPL OriginalTPL;
OriginalTPL = gBS->RaiseTPL(TPL_HIGH_LEVEL);
for (Link = mHandlers.ForwardLink; Link != &mHandlers;
Link = Link->ForwardLink) {
Entry = BASE_CR(Link, HANDLER_ENTRY, Link);
if (Entry->Vector == Vector) {
gBS->RestoreTPL(OriginalTPL);
return Entry;
}
}
gBS->RestoreTPL(OriginalTPL);
return NULL;
}
VOID EFIAPI InterruptsLibIrqHandler(
IN HARDWARE_INTERRUPT_SOURCE Source, IN EFI_SYSTEM_CONTEXT SystemContext)
{
EFI_TPL OriginalTPL;
// get handler entry
HANDLER_ENTRY *Entry = GetInterruptHandlerEntry((UINTN)Source);
ASSERT(Entry);
OriginalTPL = gBS->RaiseTPL(TPL_HIGH_LEVEL);
// call handler, ignore the return value because we don't support threads
Entry->Handler(Entry->Arg);
gBS->RestoreTPL(OriginalTPL);
// signal eoi
mInterrupt->EndOfInterrupt(mInterrupt, Source);
}
INTN mask_interrupt(UINTN Vector)
{
EFI_STATUS Status = mInterrupt->DisableInterruptSource(mInterrupt, Vector);
ASSERT_EFI_ERROR(Status);
return Status == EFI_SUCCESS ? 0 : -1;
}
INTN unmask_interrupt(UINTN Vector)
{
EFI_STATUS Status = mInterrupt->EnableInterruptSource(mInterrupt, Vector);
ASSERT_EFI_ERROR(Status);
return Status == EFI_SUCCESS ? 0 : -1;
}
VOID register_int_handler(UINTN Vector, int_handler Handler, VOID *Arg)
{
EFI_STATUS Status;
EFI_TPL OriginalTPL;
HANDLER_ENTRY *Entry;
// make sure, we didn't register a handler already
Entry = GetInterruptHandlerEntry(Vector);
ASSERT(Entry == NULL);
// allocate entry structure
Entry = AllocateZeroPool(sizeof(HANDLER_ENTRY));
if (Entry == NULL) {
ASSERT(FALSE);
return;
}
// fill struct and insert
Entry->Vector = Vector;
Entry->Handler = Handler;
Entry->Arg = Arg;
InsertTailList(&mHandlers, &Entry->Link);
OriginalTPL = gBS->RaiseTPL(TPL_HIGH_LEVEL);
// register interrupt source
Status = mInterrupt->RegisterInterruptSource(
mInterrupt, Vector, InterruptsLibIrqHandler);
ASSERT_EFI_ERROR(Status);
// disable interrupt source
mask_interrupt(Vector);
gBS->RestoreTPL(OriginalTPL);
}
RETURN_STATUS
EFIAPI
InterruptsLibConstructor(VOID)
{
EFI_STATUS Status;
Status = gBS->LocateProtocol(
&gHardwareInterruptProtocolGuid, NULL, (VOID **)&mInterrupt);
ASSERT_EFI_ERROR(Status);
return Status;
}
|
d9819b64005138d996f5fe90761f9a9e8bb8aa4f
|
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
|
/SOFTWARE/A64-TERES/linux-a64/arch/arc/include/asm/delay.h
|
43de302569815073bb4d4f23cb98b60a5e0c0552
|
[
"Linux-syscall-note",
"GPL-2.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
OLIMEX/DIY-LAPTOP
|
ae82f4ee79c641d9aee444db9a75f3f6709afa92
|
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
|
refs/heads/rel3
| 2023-08-04T01:54:19.483792
| 2023-04-03T07:18:12
| 2023-04-03T07:18:12
| 80,094,055
| 507
| 92
|
Apache-2.0
| 2023-04-03T07:05:59
| 2017-01-26T07:25:50
|
C
|
UTF-8
|
C
| false
| false
| 1,929
|
h
|
delay.h
|
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Delay routines using pre computed loops_per_jiffy value.
*
* vineetg: Feb 2012
* -Rewrote in "C" to avoid dealing with availability of H/w MPY
* -Also reduced the num of MPY operations from 3 to 2
*
* Amit Bhor: Codito Technologies 2004
*/
#ifndef __ASM_ARC_UDELAY_H
#define __ASM_ARC_UDELAY_H
#include <asm/param.h> /* HZ */
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__(
"1: sub.f %0, %0, 1 \n"
" jpnz 1b \n"
: "+r"(loops)
:
: "cc");
}
extern void __bad_udelay(void);
/*
* Normal Math for computing loops in "N" usecs
* -we have precomputed @loops_per_jiffy
* -1 sec has HZ jiffies
* loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
*
* Approximate Division by multiplication:
* -Mathematically if we multiply and divide a number by same value the
* result remains unchanged: In this case, we use 2^32
* -> (loops_per_N_usec * 2^32 ) / 2^32
* -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
* -> (loops_per_jiffy * HZ * N * 4295) / 2^32
*
* -Divide by 2^32 is very simply right shift by 32
* -We simply need to ensure that the multiply per above eqn happens in
* 64-bit precision (if CPU doesn't support it - gcc can emaulate it)
*/
static inline void __udelay(unsigned long usecs)
{
unsigned long loops;
/* (u64) cast ensures 64 bit MPY - real or emulated
* HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
*/
loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
__delay(loops);
}
#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
: __udelay(n)) : __udelay(n))
#endif /* __ASM_ARC_UDELAY_H */
|
dee9422c37b4d7d62f59541ef2a3f7bfd9fcc243
|
476ec976648ebb17c7e7df0e944910e3a36e4ea1
|
/2D/print_text.c
|
933d7e319f3244e8d739c72cbea03f0649ac204a
|
[] |
no_license
|
tom-2015/rpi-ws2812-server
|
dca65ea96a76301cd11a379ab7cdb4766e8aa89e
|
e76291fda909487fa199fd4129510dc3440ae4c6
|
refs/heads/master
| 2023-01-11T08:48:17.016626
| 2022-12-31T15:29:00
| 2022-12-31T15:29:00
| 30,657,642
| 161
| 44
| null | 2022-12-29T18:17:33
| 2015-02-11T16:34:19
|
C
|
UTF-8
|
C
| false
| false
| 3,923
|
c
|
print_text.c
|
#include "print_text.h"
//prints text at x,y
//pixel_color <channel>,<x>,<y>,<text>,<color>,<font_size>,<font_anti_alias>,<options>,<font>,<operator>
void print_text(thread_context* context, char* args) {
int channel = 0, x = 0, y = 0, color=0, font_size=8,font_anti_alias = CAIRO_ANTIALIAS_NONE, options = 0, cairo_op = CAIRO_OPERATOR_SOURCE;
char text[MAX_VAL_LEN] = { 0 };
char font[MAX_VAL_LEN] = { 0 };
args = read_channel(args, &channel);
if (is_valid_2D_channel_number(channel)) {
args = read_int(args, &x);
args = read_int(args, &y);
args = read_str(args, text, sizeof(text));
args = read_color_arg(args, &color, 4);
args = read_int(args, &font_size);
args = read_int(args, &font_anti_alias);
args = read_int(args, &options);
args = read_str(args, font, sizeof(font));
args = read_int(args, &cairo_op);
if (font[0] == 0) strcpy(font, DEFAULT_FONT);
if (debug) printf("print text %d,%d,%d,%s,%d,%d,%d,%d,%s,%d\n", channel, x, y, text, color, font_size, font_anti_alias, options, font, cairo_op);
channel_info * led_channel = get_channel(channel);
cairo_t* cr = led_channel->cr;
cairo_save(cr);
cairo_set_operator(cr, cairo_op);
set_cairo_color_rgba(cr, color);
cairo_font_options_t* font_options = cairo_font_options_create();
cairo_font_options_set_antialias(font_options, font_anti_alias); //CAIRO_ANTIALIAS_NONE
FT_Face ft_face;
bool using_freetype_lib = false;
cairo_font_face_t* cairo_ft_face;
if (file_exists(font)) { //check if file exists and load from file
if (!init_ft_lib(context)) {
cairo_restore(cr);
return;
}
FT_Error status = FT_New_Face(context->ft_lib, font, 0, &ft_face);
if (status != 0) {
fprintf(stderr, "Error %d opening font %s.\n", status, font);
cairo_restore(cr);
return;
}
cairo_ft_face = cairo_ft_font_face_create_for_ft_face(ft_face, 0);
cairo_set_font_face(cr, cairo_ft_face);
using_freetype_lib = true;
if (debug) printf("using free type file %s.\n", font);
} else {
if (options & 1) cairo_select_font_face(cr, font, CAIRO_FONT_SLANT_NORMAL, CAIRO_FONT_WEIGHT_BOLD);
else cairo_select_font_face(cr, font, CAIRO_FONT_SLANT_NORMAL, CAIRO_FONT_WEIGHT_NORMAL);
if (options & 2) cairo_font_options_set_hint_style(font_options, CAIRO_HINT_STYLE_NONE);
if (debug) printf("Cairo font family %s.\n", font);
}
/*status = FT_Init_FreeType(&value);
if (status != 0) {
fprintf(stderr, "Error %d opening library.\n", status);
}
status = FT_New_Face(value, font, 0, &face);
if (status != 0) {
fprintf(stderr, "Error %d opening %s.\n", status, font);
}
ct = cairo_ft_font_face_create_for_ft_face(face, 0);*/
cairo_set_font_options(cr, font_options);
cairo_set_font_size(cr, font_size);
cairo_move_to(cr, x, y);
cairo_show_text(cr, text);
cairo_font_options_destroy(font_options);
if (using_freetype_lib) {
//https://www.cairographics.org/manual/cairo-FreeType-Fonts.html#cairo-ft-font-face-create-for-ft-face
static const cairo_user_data_key_t key;
cairo_status_t status = cairo_font_face_set_user_data(cairo_ft_face, &key, ft_face, (cairo_destroy_func_t)FT_Done_Face);
if (status) {
cairo_font_face_destroy(cairo_ft_face);
FT_Done_Face(ft_face);
}
}
cairo_restore(cr);
} else {
fprintf(stderr, ERROR_INVALID_2D_CHANNEL);
}
}
|
ed1ce9737509f76293d30445758a3caf623b0559
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/aten/src/ATen/native/quantized/cpu/qnnpack/src/requantization/gemmlowp-scalar.c
|
84e2fc54e3cf7ff59c23008321fb6083d5000228
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
C
| false
| false
| 2,809
|
c
|
gemmlowp-scalar.c
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <assert.h>
#include <stdint.h>
#include <fp16/bitcasts.h>
#include <qnnpack/requantization-stubs.h>
#include <qnnpack/scalar-utils.h>
#include "gemmlowp-scalar.h"
void pytorch_qnnp_requantize_gemmlowp__scalar(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output) {
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = fp32_to_bits(scale);
/* Compute requantization parameters */
const uint32_t multiplier =
((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7;
const int32_t exponent = (fp32_to_bits(scale) >> 23) - 127 - 23 - 7;
const int32_t shift =
-(32 /* using high 32 bits in VQRDMUL */ - 1 /* doubling in VQRDMUL */ +
exponent);
const int32_t smin = (int32_t)(uint32_t)qmin;
const int32_t smax = (int32_t)(uint32_t)qmax;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
const int32_t x_product = gemmlowp_scalar_vqrdmulh_s32(x, multiplier);
const int32_t y_product = gemmlowp_scalar_vqrdmulh_s32(y, multiplier);
const int32_t z_product = gemmlowp_scalar_vqrdmulh_s32(z, multiplier);
const int32_t w_product = gemmlowp_scalar_vqrdmulh_s32(w, multiplier);
const int32_t x_scaled = gemmlowp_scalar_rdivbypo2_s32(x_product, shift);
const int32_t y_scaled = gemmlowp_scalar_rdivbypo2_s32(y_product, shift);
const int32_t z_scaled = gemmlowp_scalar_rdivbypo2_s32(z_product, shift);
const int32_t w_scaled = gemmlowp_scalar_rdivbypo2_s32(w_product, shift);
/* Add zero point to scaled value */
const int32_t x_biased = x_scaled + zero_point;
const int32_t y_biased = y_scaled + zero_point;
const int32_t z_biased = z_scaled + zero_point;
const int32_t w_biased = w_scaled + zero_point;
/* Clamp scaled value with zero point between smin and smax */
const int32_t x_clamped =
x_biased < smin ? smin : x_biased > smax ? smax : x_biased;
const int32_t y_clamped =
y_biased < smin ? smin : y_biased > smax ? smax : y_biased;
const int32_t z_clamped =
z_biased < smin ? smin : z_biased > smax ? smax : z_biased;
const int32_t w_clamped =
w_biased < smin ? smin : w_biased > smax ? smax : w_biased;
output[0] = (uint8_t)x_clamped;
output[1] = (uint8_t)y_clamped;
output[2] = (uint8_t)z_clamped;
output[3] = (uint8_t)w_clamped;
output += 4;
}
}
|
cd1c997823e3be663c6b05779fa21ad381b94c5f
|
c8b39acfd4a857dc15ed3375e0d93e75fa3f1f64
|
/Engine/Source/Editor/PropertyEditor/Public/PropertyEditing.h
|
90238f15c5e69518bfc21cc506e06919db1a267f
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
windystrife/UnrealEngine_NVIDIAGameWorks
|
c3c7863083653caf1bc67d3ef104fb4b9f302e2a
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
refs/heads/4.18-GameWorks
| 2023-03-11T02:50:08.471040
| 2022-01-13T20:50:29
| 2022-01-13T20:50:29
| 124,100,479
| 262
| 179
|
MIT
| 2022-12-16T05:36:38
| 2018-03-06T15:44:09
|
C++
|
UTF-8
|
C
| false
| false
| 627
|
h
|
PropertyEditing.h
|
// Copyright 1998-2017 Epic Games, Inc. All Rights Reserved.
#pragma once
#include "Misc/MonolithicHeaderBoilerplate.h"
MONOLITHIC_HEADER_BOILERPLATE()
#include "PropertyEditorDelegates.h"
#include "PropertyEditorModule.h"
#include "PropertyHandle.h"
#include "DetailWidgetRow.h"
#include "IDetailGroup.h"
#include "IDetailCustomNodeBuilder.h"
#include "IDetailPropertyRow.h"
#include "DetailLayoutBuilder.h"
#include "DetailCategoryBuilder.h"
#include "IPropertyTypeCustomization.h"
#include "IDetailChildrenBuilder.h"
#include "IDetailCustomization.h"
#include "IDetailsView.h"
#include "IDetailRootObjectCustomization.h"
|
16408d56d57365190fcc13e606f5bb471bde4b6a
|
fb47ab6337a71029dee71933e449cf7f6805fc0f
|
/external/platform/cc13xx/cc13xxware/inc/hw_cpu_dwt.h
|
11de7120b9522773c0dc3afa877e0bae8acfe9a8
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
littlekernel/lk
|
7e7ba50b87b1f2e0b6e2f052c59249825c91975b
|
30dc320054f70910e1c1ee40a6948ee99672acec
|
refs/heads/master
| 2023-09-02T00:47:52.203963
| 2023-06-21T22:42:35
| 2023-06-21T22:42:35
| 3,058,456
| 3,077
| 618
|
MIT
| 2023-08-30T09:41:31
| 2011-12-27T19:19:36
|
C
|
UTF-8
|
C
| false
| false
| 41,821
|
h
|
hw_cpu_dwt.h
|
/******************************************************************************
* Filename: hw_cpu_dwt_h
* Revised: 2015-11-12 13:07:02 +0100 (Thu, 12 Nov 2015)
* Revision: 45056
*
* Copyright (c) 2015, Texas Instruments Incorporated
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1) Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2) Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3) Neither the name of the ORGANIZATION nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#ifndef __HW_CPU_DWT_H__
#define __HW_CPU_DWT_H__
//*****************************************************************************
//
// This section defines the register offsets of
// CPU_DWT component
//
//*****************************************************************************
// Control
#define CPU_DWT_O_CTRL 0x00000000
// Current PC Sampler Cycle Count
#define CPU_DWT_O_CYCCNT 0x00000004
// CPI Count
#define CPU_DWT_O_CPICNT 0x00000008
// Exception Overhead Count
#define CPU_DWT_O_EXCCNT 0x0000000C
// Sleep Count
#define CPU_DWT_O_SLEEPCNT 0x00000010
// LSU Count
#define CPU_DWT_O_LSUCNT 0x00000014
// Fold Count
#define CPU_DWT_O_FOLDCNT 0x00000018
// Program Counter Sample
#define CPU_DWT_O_PCSR 0x0000001C
// Comparator 0
#define CPU_DWT_O_COMP0 0x00000020
// Mask 0
#define CPU_DWT_O_MASK0 0x00000024
// Function 0
#define CPU_DWT_O_FUNCTION0 0x00000028
// Comparator 1
#define CPU_DWT_O_COMP1 0x00000030
// Mask 1
#define CPU_DWT_O_MASK1 0x00000034
// Function 1
#define CPU_DWT_O_FUNCTION1 0x00000038
// Comparator 2
#define CPU_DWT_O_COMP2 0x00000040
// Mask 2
#define CPU_DWT_O_MASK2 0x00000044
// Function 2
#define CPU_DWT_O_FUNCTION2 0x00000048
// Comparator 3
#define CPU_DWT_O_COMP3 0x00000050
// Mask 3
#define CPU_DWT_O_MASK3 0x00000054
// Function 3
#define CPU_DWT_O_FUNCTION3 0x00000058
//*****************************************************************************
//
// Register: CPU_DWT_O_CTRL
//
//*****************************************************************************
// Field: [25] NOCYCCNT
//
// When set, CYCCNT is not supported.
#define CPU_DWT_CTRL_NOCYCCNT 0x02000000
#define CPU_DWT_CTRL_NOCYCCNT_BITN 25
#define CPU_DWT_CTRL_NOCYCCNT_M 0x02000000
#define CPU_DWT_CTRL_NOCYCCNT_S 25
// Field: [24] NOPRFCNT
//
// When set, FOLDCNT, LSUCNT, SLEEPCNT, EXCCNT, and CPICNT are not supported.
#define CPU_DWT_CTRL_NOPRFCNT 0x01000000
#define CPU_DWT_CTRL_NOPRFCNT_BITN 24
#define CPU_DWT_CTRL_NOPRFCNT_M 0x01000000
#define CPU_DWT_CTRL_NOPRFCNT_S 24
// Field: [22] CYCEVTENA
//
// Enables Cycle count event. Emits an event when the POSTCNT counter triggers
// it. See CYCTAP and POSTPRESET for details. This event is only emitted if
// PCSAMPLEENA is disabled. PCSAMPLEENA overrides the setting of this bit.
//
// 0: Cycle count events disabled
// 1: Cycle count events enabled
#define CPU_DWT_CTRL_CYCEVTENA 0x00400000
#define CPU_DWT_CTRL_CYCEVTENA_BITN 22
#define CPU_DWT_CTRL_CYCEVTENA_M 0x00400000
#define CPU_DWT_CTRL_CYCEVTENA_S 22
// Field: [21] FOLDEVTENA
//
// Enables Folded instruction count event. Emits an event when FOLDCNT
// overflows (every 256 cycles of folded instructions). A folded instruction is
// one that does not incur even one cycle to execute. For example, an IT
// instruction is folded away and so does not use up one cycle.
//
// 0: Folded instruction count events disabled.
// 1: Folded instruction count events enabled.
#define CPU_DWT_CTRL_FOLDEVTENA 0x00200000
#define CPU_DWT_CTRL_FOLDEVTENA_BITN 21
#define CPU_DWT_CTRL_FOLDEVTENA_M 0x00200000
#define CPU_DWT_CTRL_FOLDEVTENA_S 21
// Field: [20] LSUEVTENA
//
// Enables LSU count event. Emits an event when LSUCNT overflows (every 256
// cycles of LSU operation). LSU counts include all LSU costs after the initial
// cycle for the instruction.
//
// 0: LSU count events disabled.
// 1: LSU count events enabled.
#define CPU_DWT_CTRL_LSUEVTENA 0x00100000
#define CPU_DWT_CTRL_LSUEVTENA_BITN 20
#define CPU_DWT_CTRL_LSUEVTENA_M 0x00100000
#define CPU_DWT_CTRL_LSUEVTENA_S 20
// Field: [19] SLEEPEVTENA
//
// Enables Sleep count event. Emits an event when SLEEPCNT overflows (every 256
// cycles that the processor is sleeping).
//
// 0: Sleep count events disabled.
// 1: Sleep count events enabled.
#define CPU_DWT_CTRL_SLEEPEVTENA 0x00080000
#define CPU_DWT_CTRL_SLEEPEVTENA_BITN 19
#define CPU_DWT_CTRL_SLEEPEVTENA_M 0x00080000
#define CPU_DWT_CTRL_SLEEPEVTENA_S 19
// Field: [18] EXCEVTENA
//
// Enables Interrupt overhead event. Emits an event when EXCCNT overflows
// (every 256 cycles of interrupt overhead).
//
// 0x0: Interrupt overhead event disabled.
// 0x1: Interrupt overhead event enabled.
#define CPU_DWT_CTRL_EXCEVTENA 0x00040000
#define CPU_DWT_CTRL_EXCEVTENA_BITN 18
#define CPU_DWT_CTRL_EXCEVTENA_M 0x00040000
#define CPU_DWT_CTRL_EXCEVTENA_S 18
// Field: [17] CPIEVTENA
//
// Enables CPI count event. Emits an event when CPICNT overflows (every 256
// cycles of multi-cycle instructions).
//
// 0: CPI counter events disabled.
// 1: CPI counter events enabled.
#define CPU_DWT_CTRL_CPIEVTENA 0x00020000
#define CPU_DWT_CTRL_CPIEVTENA_BITN 17
#define CPU_DWT_CTRL_CPIEVTENA_M 0x00020000
#define CPU_DWT_CTRL_CPIEVTENA_S 17
// Field: [16] EXCTRCENA
//
// Enables Interrupt event tracing.
//
// 0: Interrupt event trace disabled.
// 1: Interrupt event trace enabled.
#define CPU_DWT_CTRL_EXCTRCENA 0x00010000
#define CPU_DWT_CTRL_EXCTRCENA_BITN 16
#define CPU_DWT_CTRL_EXCTRCENA_M 0x00010000
#define CPU_DWT_CTRL_EXCTRCENA_S 16
// Field: [12] PCSAMPLEENA
//
// Enables PC Sampling event. A PC sample event is emitted when the POSTCNT
// counter triggers it. See CYCTAP and POSTPRESET for details. Enabling this
// bit overrides CYCEVTENA.
//
// 0: PC Sampling event disabled.
// 1: Sampling event enabled.
#define CPU_DWT_CTRL_PCSAMPLEENA 0x00001000
#define CPU_DWT_CTRL_PCSAMPLEENA_BITN 12
#define CPU_DWT_CTRL_PCSAMPLEENA_M 0x00001000
#define CPU_DWT_CTRL_PCSAMPLEENA_S 12
// Field: [11:10] SYNCTAP
//
// Selects a synchronization packet rate. CYCCNTENA and CPU_ITM:TCR.SYNCENA
// must also be enabled for this feature.
// Synchronization packets (if enabled) are generated on tap transitions (0 to1
// or 1 to 0).
// ENUMs:
// BIT28 Tap at bit 28 of CYCCNT
// BIT26 Tap at bit 26 of CYCCNT
// BIT24 Tap at bit 24 of CYCCNT
// DIS Disabled. No synchronization packets
#define CPU_DWT_CTRL_SYNCTAP_W 2
#define CPU_DWT_CTRL_SYNCTAP_M 0x00000C00
#define CPU_DWT_CTRL_SYNCTAP_S 10
#define CPU_DWT_CTRL_SYNCTAP_BIT28 0x00000C00
#define CPU_DWT_CTRL_SYNCTAP_BIT26 0x00000800
#define CPU_DWT_CTRL_SYNCTAP_BIT24 0x00000400
#define CPU_DWT_CTRL_SYNCTAP_DIS 0x00000000
// Field: [9] CYCTAP
//
// Selects a tap on CYCCNT. These are spaced at bits [6] and [10]. When the
// selected bit in CYCCNT changes from 0 to 1 or 1 to 0, it emits into the
// POSTCNT, post-scalar counter. That counter then counts down. On a bit change
// when post-scalar is 0, it triggers an event for PC sampling or cycle count
// event (see details in CYCEVTENA).
// ENUMs:
// BIT10 Selects bit [10] to tap
// BIT6 Selects bit [6] to tap
#define CPU_DWT_CTRL_CYCTAP 0x00000200
#define CPU_DWT_CTRL_CYCTAP_BITN 9
#define CPU_DWT_CTRL_CYCTAP_M 0x00000200
#define CPU_DWT_CTRL_CYCTAP_S 9
#define CPU_DWT_CTRL_CYCTAP_BIT10 0x00000200
#define CPU_DWT_CTRL_CYCTAP_BIT6 0x00000000
// Field: [8:5] POSTCNT
//
// Post-scalar counter for CYCTAP. When the selected tapped bit changes from 0
// to 1 or 1 to 0, the post scalar counter is down-counted when not 0. If 0, it
// triggers an event for PCSAMPLEENA or CYCEVTENA use. It also reloads with the
// value from POSTPRESET.
#define CPU_DWT_CTRL_POSTCNT_W 4
#define CPU_DWT_CTRL_POSTCNT_M 0x000001E0
#define CPU_DWT_CTRL_POSTCNT_S 5
// Field: [4:1] POSTPRESET
//
// Reload value for post-scalar counter POSTCNT. When 0, events are triggered
// on each tap change (a power of 2). If this field has a non-0 value, it forms
// a count-down value, to be reloaded into POSTCNT each time it reaches 0. For
// example, a value 1 in this register means an event is formed every other tap
// change.
#define CPU_DWT_CTRL_POSTPRESET_W 4
#define CPU_DWT_CTRL_POSTPRESET_M 0x0000001E
#define CPU_DWT_CTRL_POSTPRESET_S 1
// Field: [0] CYCCNTENA
//
// Enable CYCCNT, allowing it to increment and generate synchronization and
// count events. If NOCYCCNT = 1, this bit reads zero and ignore writes.
#define CPU_DWT_CTRL_CYCCNTENA 0x00000001
#define CPU_DWT_CTRL_CYCCNTENA_BITN 0
#define CPU_DWT_CTRL_CYCCNTENA_M 0x00000001
#define CPU_DWT_CTRL_CYCCNTENA_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_CYCCNT
//
//*****************************************************************************
// Field: [31:0] CYCCNT
//
// Current PC Sampler Cycle Counter count value. When enabled, this counter
// counts the number of core cycles, except when the core is halted. The cycle
// counter is a free running counter, counting upwards (this counter will not
// advance in power modes where free-running clock to CPU stops). It wraps
// around to 0 on overflow. The debugger must initialize this to 0 when first
// enabling.
#define CPU_DWT_CYCCNT_CYCCNT_W 32
#define CPU_DWT_CYCCNT_CYCCNT_M 0xFFFFFFFF
#define CPU_DWT_CYCCNT_CYCCNT_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_CPICNT
//
//*****************************************************************************
// Field: [7:0] CPICNT
//
// Current CPI counter value. Increments on the additional cycles (the first
// cycle is not counted) required to execute all instructions except those
// recorded by LSUCNT. This counter also increments on all instruction fetch
// stalls. If CTRL.CPIEVTENA is set, an event is emitted when the counter
// overflows. This counter initializes to 0 when it is enabled using
// CTRL.CPIEVTENA.
#define CPU_DWT_CPICNT_CPICNT_W 8
#define CPU_DWT_CPICNT_CPICNT_M 0x000000FF
#define CPU_DWT_CPICNT_CPICNT_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_EXCCNT
//
//*****************************************************************************
// Field: [7:0] EXCCNT
//
// Current interrupt overhead counter value. Counts the total cycles spent in
// interrupt processing (for example entry stacking, return unstacking,
// pre-emption). An event is emitted on counter overflow (every 256 cycles).
// This counter initializes to 0 when it is enabled using CTRL.EXCEVTENA.
#define CPU_DWT_EXCCNT_EXCCNT_W 8
#define CPU_DWT_EXCCNT_EXCCNT_M 0x000000FF
#define CPU_DWT_EXCCNT_EXCCNT_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_SLEEPCNT
//
//*****************************************************************************
// Field: [7:0] SLEEPCNT
//
// Sleep counter. Counts the number of cycles during which the processor is
// sleeping. An event is emitted on counter overflow (every 256 cycles). This
// counter initializes to 0 when it is enabled using CTRL.SLEEPEVTENA. Note
// that the sleep counter is clocked using CPU's free-running clock. In some
// power modes the free-running clock to CPU is gated to minimize power
// consumption. This means that the sleep counter will be invalid in these
// power modes.
#define CPU_DWT_SLEEPCNT_SLEEPCNT_W 8
#define CPU_DWT_SLEEPCNT_SLEEPCNT_M 0x000000FF
#define CPU_DWT_SLEEPCNT_SLEEPCNT_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_LSUCNT
//
//*****************************************************************************
// Field: [7:0] LSUCNT
//
// LSU counter. This counts the total number of cycles that the processor is
// processing an LSU operation. The initial execution cost of the instruction
// is not counted. For example, an LDR that takes two cycles to complete
// increments this counter one cycle. Equivalently, an LDR that stalls for two
// cycles (i.e. takes four cycles to execute), increments this counter three
// times. An event is emitted on counter overflow (every 256 cycles). This
// counter initializes to 0 when it is enabled using CTRL.LSUEVTENA.
#define CPU_DWT_LSUCNT_LSUCNT_W 8
#define CPU_DWT_LSUCNT_LSUCNT_M 0x000000FF
#define CPU_DWT_LSUCNT_LSUCNT_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_FOLDCNT
//
//*****************************************************************************
// Field: [7:0] FOLDCNT
//
// This counts the total number folded instructions. This counter initializes
// to 0 when it is enabled using CTRL.FOLDEVTENA.
#define CPU_DWT_FOLDCNT_FOLDCNT_W 8
#define CPU_DWT_FOLDCNT_FOLDCNT_M 0x000000FF
#define CPU_DWT_FOLDCNT_FOLDCNT_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_PCSR
//
//*****************************************************************************
// Field: [31:0] EIASAMPLE
//
// Execution instruction address sample, or 0xFFFFFFFF if the core is halted.
#define CPU_DWT_PCSR_EIASAMPLE_W 32
#define CPU_DWT_PCSR_EIASAMPLE_M 0xFFFFFFFF
#define CPU_DWT_PCSR_EIASAMPLE_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_COMP0
//
//*****************************************************************************
// Field: [31:0] COMP
//
// Reference value to compare against PC or the data address as given by
// FUNCTION0. Comparator 0 can also compare against the value of the PC Sampler
// Counter (CYCCNT).
#define CPU_DWT_COMP0_COMP_W 32
#define CPU_DWT_COMP0_COMP_M 0xFFFFFFFF
#define CPU_DWT_COMP0_COMP_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_MASK0
//
//*****************************************************************************
// Field: [3:0] MASK
//
// Mask on data address when matching against COMP0. This is the size of the
// ignore mask. That is, DWT matching is performed as:(ADDR ANDed with (0xFFFF
// left bit-shifted by MASK)) == COMP0. However, the actual comparison is
// slightly more complex to enable matching an address wherever it appears on a
// bus. So, if COMP0 is 3, this matches a word access of 0, because 3 would be
// within the word.
#define CPU_DWT_MASK0_MASK_W 4
#define CPU_DWT_MASK0_MASK_M 0x0000000F
#define CPU_DWT_MASK0_MASK_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_FUNCTION0
//
//*****************************************************************************
// Field: [24] MATCHED
//
// This bit is set when the comparator matches, and indicates that the
// operation defined by FUNCTION has occurred since this bit was last read.
// This bit is cleared on read.
#define CPU_DWT_FUNCTION0_MATCHED 0x01000000
#define CPU_DWT_FUNCTION0_MATCHED_BITN 24
#define CPU_DWT_FUNCTION0_MATCHED_M 0x01000000
#define CPU_DWT_FUNCTION0_MATCHED_S 24
// Field: [7] CYCMATCH
//
// This bit is only available in comparator 0. When set, COMP0 will compare
// against the cycle counter (CYCCNT).
#define CPU_DWT_FUNCTION0_CYCMATCH 0x00000080
#define CPU_DWT_FUNCTION0_CYCMATCH_BITN 7
#define CPU_DWT_FUNCTION0_CYCMATCH_M 0x00000080
#define CPU_DWT_FUNCTION0_CYCMATCH_S 7
// Field: [5] EMITRANGE
//
// Emit range field. This bit permits emitting offset when range match occurs.
// PC sampling is not supported when emit range is enabled.
// This field only applies for: FUNCTION = 1, 2, 3, 12, 13, 14, and 15.
#define CPU_DWT_FUNCTION0_EMITRANGE 0x00000020
#define CPU_DWT_FUNCTION0_EMITRANGE_BITN 5
#define CPU_DWT_FUNCTION0_EMITRANGE_M 0x00000020
#define CPU_DWT_FUNCTION0_EMITRANGE_S 5
// Field: [3:0] FUNCTION
//
// Function settings.
//
// 0x0: Disabled
// 0x1: EMITRANGE = 0, sample and emit PC through ITM. EMITRANGE = 1, emit
// address offset through ITM
// 0x2: EMITRANGE = 0, emit data through ITM on read and write. EMITRANGE = 1,
// emit data and address offset through ITM on read or write.
// 0x3: EMITRANGE = 0, sample PC and data value through ITM on read or write.
// EMITRANGE = 1, emit address offset and data value through ITM on read or
// write.
// 0x4: Watchpoint on PC match.
// 0x5: Watchpoint on read.
// 0x6: Watchpoint on write.
// 0x7: Watchpoint on read or write.
// 0x8: ETM trigger on PC match
// 0x9: ETM trigger on read
// 0xA: ETM trigger on write
// 0xB: ETM trigger on read or write
// 0xC: EMITRANGE = 0, sample data for read transfers. EMITRANGE = 1, sample
// Daddr (lower 16 bits) for read transfers
// 0xD: EMITRANGE = 0, sample data for write transfers. EMITRANGE = 1, sample
// Daddr (lower 16 bits) for write transfers
// 0xE: EMITRANGE = 0, sample PC + data for read transfers. EMITRANGE = 1,
// sample Daddr (lower 16 bits) + data for read transfers
// 0xF: EMITRANGE = 0, sample PC + data for write transfers. EMITRANGE = 1,
// sample Daddr (lower 16 bits) + data for write transfers
//
// Note 1: If the ETM is not fitted, then ETM trigger is not possible.
// Note 2: Data value is only sampled for accesses that do not fault (MPU or
// bus fault). The PC is sampled irrespective of any faults. The PC is only
// sampled for the first address of a burst.
// Note 3: PC match is not recommended for watchpoints because it stops after
// the instruction. It mainly guards and triggers the ETM.
#define CPU_DWT_FUNCTION0_FUNCTION_W 4
#define CPU_DWT_FUNCTION0_FUNCTION_M 0x0000000F
#define CPU_DWT_FUNCTION0_FUNCTION_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_COMP1
//
//*****************************************************************************
// Field: [31:0] COMP
//
// Reference value to compare against PC or the data address as given by
// FUNCTION1.
// Comparator 1 can also compare data values. So this register can contain
// reference values for data matching.
#define CPU_DWT_COMP1_COMP_W 32
#define CPU_DWT_COMP1_COMP_M 0xFFFFFFFF
#define CPU_DWT_COMP1_COMP_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_MASK1
//
//*****************************************************************************
// Field: [3:0] MASK
//
// Mask on data address when matching against COMP1. This is the size of the
// ignore mask. That is, DWT matching is performed as:(ADDR ANDed with (0xFFFF
// left bit-shifted by MASK)) == COMP1. However, the actual comparison is
// slightly more complex to enable matching an address wherever it appears on a
// bus. So, if COMP1 is 3, this matches a word access of 0, because 3 would be
// within the word.
#define CPU_DWT_MASK1_MASK_W 4
#define CPU_DWT_MASK1_MASK_M 0x0000000F
#define CPU_DWT_MASK1_MASK_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_FUNCTION1
//
//*****************************************************************************
// Field: [24] MATCHED
//
// This bit is set when the comparator matches, and indicates that the
// operation defined by FUNCTION has occurred since this bit was last read.
// This bit is cleared on read.
#define CPU_DWT_FUNCTION1_MATCHED 0x01000000
#define CPU_DWT_FUNCTION1_MATCHED_BITN 24
#define CPU_DWT_FUNCTION1_MATCHED_M 0x01000000
#define CPU_DWT_FUNCTION1_MATCHED_S 24
// Field: [19:16] DATAVADDR1
//
// Identity of a second linked address comparator for data value matching when
// DATAVMATCH == 1 and LNK1ENA == 1.
#define CPU_DWT_FUNCTION1_DATAVADDR1_W 4
#define CPU_DWT_FUNCTION1_DATAVADDR1_M 0x000F0000
#define CPU_DWT_FUNCTION1_DATAVADDR1_S 16
// Field: [15:12] DATAVADDR0
//
// Identity of a linked address comparator for data value matching when
// DATAVMATCH == 1.
#define CPU_DWT_FUNCTION1_DATAVADDR0_W 4
#define CPU_DWT_FUNCTION1_DATAVADDR0_M 0x0000F000
#define CPU_DWT_FUNCTION1_DATAVADDR0_S 12
// Field: [11:10] DATAVSIZE
//
// Defines the size of the data in the COMP1 register that is to be matched:
//
// 0x0: Byte
// 0x1: Halfword
// 0x2: Word
// 0x3: Unpredictable.
#define CPU_DWT_FUNCTION1_DATAVSIZE_W 2
#define CPU_DWT_FUNCTION1_DATAVSIZE_M 0x00000C00
#define CPU_DWT_FUNCTION1_DATAVSIZE_S 10
// Field: [9] LNK1ENA
//
// Read only bit-field only supported in comparator 1.
//
// 0: DATAVADDR1 not supported
// 1: DATAVADDR1 supported (enabled)
#define CPU_DWT_FUNCTION1_LNK1ENA 0x00000200
#define CPU_DWT_FUNCTION1_LNK1ENA_BITN 9
#define CPU_DWT_FUNCTION1_LNK1ENA_M 0x00000200
#define CPU_DWT_FUNCTION1_LNK1ENA_S 9
// Field: [8] DATAVMATCH
//
// Data match feature:
//
// 0: Perform address comparison
// 1: Perform data value compare. The comparators given by DATAVADDR0 and
// DATAVADDR1 provide the address for the data comparison. The FUNCTION setting
// for the comparators given by DATAVADDR0 and DATAVADDR1 are overridden and
// those comparators only provide the address match for the data comparison.
//
// This bit is only available in comparator 1.
#define CPU_DWT_FUNCTION1_DATAVMATCH 0x00000100
#define CPU_DWT_FUNCTION1_DATAVMATCH_BITN 8
#define CPU_DWT_FUNCTION1_DATAVMATCH_M 0x00000100
#define CPU_DWT_FUNCTION1_DATAVMATCH_S 8
// Field: [5] EMITRANGE
//
// Emit range field. This bit permits emitting offset when range match occurs.
// PC sampling is not supported when emit range is enabled.
// This field only applies for: FUNCTION = 1, 2, 3, 12, 13, 14, and 15.
#define CPU_DWT_FUNCTION1_EMITRANGE 0x00000020
#define CPU_DWT_FUNCTION1_EMITRANGE_BITN 5
#define CPU_DWT_FUNCTION1_EMITRANGE_M 0x00000020
#define CPU_DWT_FUNCTION1_EMITRANGE_S 5
// Field: [3:0] FUNCTION
//
// Function settings:
//
// 0x0: Disabled
// 0x1: EMITRANGE = 0, sample and emit PC through ITM. EMITRANGE = 1, emit
// address offset through ITM
// 0x2: EMITRANGE = 0, emit data through ITM on read and write. EMITRANGE = 1,
// emit data and address offset through ITM on read or write.
// 0x3: EMITRANGE = 0, sample PC and data value through ITM on read or write.
// EMITRANGE = 1, emit address offset and data value through ITM on read or
// write.
// 0x4: Watchpoint on PC match.
// 0x5: Watchpoint on read.
// 0x6: Watchpoint on write.
// 0x7: Watchpoint on read or write.
// 0x8: ETM trigger on PC match
// 0x9: ETM trigger on read
// 0xA: ETM trigger on write
// 0xB: ETM trigger on read or write
// 0xC: EMITRANGE = 0, sample data for read transfers. EMITRANGE = 1, sample
// Daddr (lower 16 bits) for read transfers
// 0xD: EMITRANGE = 0, sample data for write transfers. EMITRANGE = 1, sample
// Daddr (lower 16 bits) for write transfers
// 0xE: EMITRANGE = 0, sample PC + data for read transfers. EMITRANGE = 1,
// sample Daddr (lower 16 bits) + data for read transfers
// 0xF: EMITRANGE = 0, sample PC + data for write transfers. EMITRANGE = 1,
// sample Daddr (lower 16 bits) + data for write transfers
//
// Note 1: If the ETM is not fitted, then ETM trigger is not possible.
// Note 2: Data value is only sampled for accesses that do not fault (MPU or
// bus fault). The PC is sampled irrespective of any faults. The PC is only
// sampled for the first address of a burst.
// Note 3: FUNCTION is overridden for comparators given by DATAVADDR0 and
// DATAVADDR1 if DATAVMATCH is also set. The comparators given by DATAVADDR0
// and DATAVADDR1 can then only perform address comparator matches for
// comparator 1 data matches.
// Note 4: If the data matching functionality is not included during
// implementation it is not possible to set DATAVADDR0, DATAVADDR1, or
// DATAVMATCH. This means that the data matching functionality is not available
// in the implementation. Test the availability of data matching by writing and
// reading DATAVMATCH. If it is not settable then data matching is unavailable.
// Note 5: PC match is not recommended for watchpoints because it stops after
// the instruction. It mainly guards and triggers the ETM.
#define CPU_DWT_FUNCTION1_FUNCTION_W 4
#define CPU_DWT_FUNCTION1_FUNCTION_M 0x0000000F
#define CPU_DWT_FUNCTION1_FUNCTION_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_COMP2
//
//*****************************************************************************
// Field: [31:0] COMP
//
// Reference value to compare against PC or the data address as given by
// FUNCTION2.
#define CPU_DWT_COMP2_COMP_W 32
#define CPU_DWT_COMP2_COMP_M 0xFFFFFFFF
#define CPU_DWT_COMP2_COMP_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_MASK2
//
//*****************************************************************************
// Field: [3:0] MASK
//
// Mask on data address when matching against COMP2. This is the size of the
// ignore mask. That is, DWT matching is performed as:(ADDR ANDed with (0xFFFF
// left bit-shifted by MASK)) == COMP2. However, the actual comparison is
// slightly more complex to enable matching an address wherever it appears on a
// bus. So, if COMP2 is 3, this matches a word access of 0, because 3 would be
// within the word.
#define CPU_DWT_MASK2_MASK_W 4
#define CPU_DWT_MASK2_MASK_M 0x0000000F
#define CPU_DWT_MASK2_MASK_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_FUNCTION2
//
//*****************************************************************************
// Field: [24] MATCHED
//
// This bit is set when the comparator matches, and indicates that the
// operation defined by FUNCTION has occurred since this bit was last read.
// This bit is cleared on read.
#define CPU_DWT_FUNCTION2_MATCHED 0x01000000
#define CPU_DWT_FUNCTION2_MATCHED_BITN 24
#define CPU_DWT_FUNCTION2_MATCHED_M 0x01000000
#define CPU_DWT_FUNCTION2_MATCHED_S 24
// Field: [5] EMITRANGE
//
// Emit range field. This bit permits emitting offset when range match occurs.
// PC sampling is not supported when emit range is enabled.
// This field only applies for: FUNCTION = 1, 2, 3, 12, 13, 14, and 15.
#define CPU_DWT_FUNCTION2_EMITRANGE 0x00000020
#define CPU_DWT_FUNCTION2_EMITRANGE_BITN 5
#define CPU_DWT_FUNCTION2_EMITRANGE_M 0x00000020
#define CPU_DWT_FUNCTION2_EMITRANGE_S 5
// Field: [3:0] FUNCTION
//
// Function settings.
//
// 0x0: Disabled
// 0x1: EMITRANGE = 0, sample and emit PC through ITM. EMITRANGE = 1, emit
// address offset through ITM
// 0x2: EMITRANGE = 0, emit data through ITM on read and write. EMITRANGE = 1,
// emit data and address offset through ITM on read or write.
// 0x3: EMITRANGE = 0, sample PC and data value through ITM on read or write.
// EMITRANGE = 1, emit address offset and data value through ITM on read or
// write.
// 0x4: Watchpoint on PC match.
// 0x5: Watchpoint on read.
// 0x6: Watchpoint on write.
// 0x7: Watchpoint on read or write.
// 0x8: ETM trigger on PC match
// 0x9: ETM trigger on read
// 0xA: ETM trigger on write
// 0xB: ETM trigger on read or write
// 0xC: EMITRANGE = 0, sample data for read transfers. EMITRANGE = 1, sample
// Daddr (lower 16 bits) for read transfers
// 0xD: EMITRANGE = 0, sample data for write transfers. EMITRANGE = 1, sample
// Daddr (lower 16 bits) for write transfers
// 0xE: EMITRANGE = 0, sample PC + data for read transfers. EMITRANGE = 1,
// sample Daddr (lower 16 bits) + data for read transfers
// 0xF: EMITRANGE = 0, sample PC + data for write transfers. EMITRANGE = 1,
// sample Daddr (lower 16 bits) + data for write transfers
//
// Note 1: If the ETM is not fitted, then ETM trigger is not possible.
// Note 2: Data value is only sampled for accesses that do not fault (MPU or
// bus fault). The PC is sampled irrespective of any faults. The PC is only
// sampled for the first address of a burst.
// Note 3: PC match is not recommended for watchpoints because it stops after
// the instruction. It mainly guards and triggers the ETM.
#define CPU_DWT_FUNCTION2_FUNCTION_W 4
#define CPU_DWT_FUNCTION2_FUNCTION_M 0x0000000F
#define CPU_DWT_FUNCTION2_FUNCTION_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_COMP3
//
//*****************************************************************************
// Field: [31:0] COMP
//
// Reference value to compare against PC or the data address as given by
// FUNCTION3.
#define CPU_DWT_COMP3_COMP_W 32
#define CPU_DWT_COMP3_COMP_M 0xFFFFFFFF
#define CPU_DWT_COMP3_COMP_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_MASK3
//
//*****************************************************************************
// Field: [3:0] MASK
//
// Mask on data address when matching against COMP3. This is the size of the
// ignore mask. That is, DWT matching is performed as:(ADDR ANDed with (0xFFFF
// left bit-shifted by MASK)) == COMP3. However, the actual comparison is
// slightly more complex to enable matching an address wherever it appears on a
// bus. So, if COMP3 is 3, this matches a word access of 0, because 3 would be
// within the word.
#define CPU_DWT_MASK3_MASK_W 4
#define CPU_DWT_MASK3_MASK_M 0x0000000F
#define CPU_DWT_MASK3_MASK_S 0
//*****************************************************************************
//
// Register: CPU_DWT_O_FUNCTION3
//
//*****************************************************************************
// Field: [24] MATCHED
//
// This bit is set when the comparator matches, and indicates that the
// operation defined by FUNCTION has occurred since this bit was last read.
// This bit is cleared on read.
#define CPU_DWT_FUNCTION3_MATCHED 0x01000000
#define CPU_DWT_FUNCTION3_MATCHED_BITN 24
#define CPU_DWT_FUNCTION3_MATCHED_M 0x01000000
#define CPU_DWT_FUNCTION3_MATCHED_S 24
// Field: [5] EMITRANGE
//
// Emit range field. This bit permits emitting offset when range match occurs.
// PC sampling is not supported when emit range is enabled.
// This field only applies for: FUNCTION = 1, 2, 3, 12, 13, 14, and 15.
#define CPU_DWT_FUNCTION3_EMITRANGE 0x00000020
#define CPU_DWT_FUNCTION3_EMITRANGE_BITN 5
#define CPU_DWT_FUNCTION3_EMITRANGE_M 0x00000020
#define CPU_DWT_FUNCTION3_EMITRANGE_S 5
// Field: [3:0] FUNCTION
//
// Function settings.
//
// 0x0: Disabled
// 0x1: EMITRANGE = 0, sample and emit PC through ITM. EMITRANGE = 1, emit
// address offset through ITM
// 0x2: EMITRANGE = 0, emit data through ITM on read and write. EMITRANGE = 1,
// emit data and address offset through ITM on read or write.
// 0x3: EMITRANGE = 0, sample PC and data value through ITM on read or write.
// EMITRANGE = 1, emit address offset and data value through ITM on read or
// write.
// 0x4: Watchpoint on PC match.
// 0x5: Watchpoint on read.
// 0x6: Watchpoint on write.
// 0x7: Watchpoint on read or write.
// 0x8: ETM trigger on PC match
// 0x9: ETM trigger on read
// 0xA: ETM trigger on write
// 0xB: ETM trigger on read or write
// 0xC: EMITRANGE = 0, sample data for read transfers. EMITRANGE = 1, sample
// Daddr (lower 16 bits) for read transfers
// 0xD: EMITRANGE = 0, sample data for write transfers. EMITRANGE = 1, sample
// Daddr (lower 16 bits) for write transfers
// 0xE: EMITRANGE = 0, sample PC + data for read transfers. EMITRANGE = 1,
// sample Daddr (lower 16 bits) + data for read transfers
// 0xF: EMITRANGE = 0, sample PC + data for write transfers. EMITRANGE = 1,
// sample Daddr (lower 16 bits) + data for write transfers
//
// Note 1: If the ETM is not fitted, then ETM trigger is not possible.
// Note 2: Data value is only sampled for accesses that do not fault (MPU or
// bus fault). The PC is sampled irrespective of any faults. The PC is only
// sampled for the first address of a burst.
// Note 3: PC match is not recommended for watchpoints because it stops after
// the instruction. It mainly guards and triggers the ETM.
#define CPU_DWT_FUNCTION3_FUNCTION_W 4
#define CPU_DWT_FUNCTION3_FUNCTION_M 0x0000000F
#define CPU_DWT_FUNCTION3_FUNCTION_S 0
#endif // __CPU_DWT__
|
0de3058914fc3b9d6175fa4675f3d94cda0bfe96
|
99bdb3251fecee538e0630f15f6574054dfc1468
|
/bsp/stm32/stm32l475-atk-pandora/board/pm_cfg.h
|
bb4c3cf22d2736a2f706b05ae0364c27efe2c53b
|
[
"Zlib",
"LicenseRef-scancode-proprietary-license",
"MIT",
"BSD-3-Clause",
"X11",
"BSD-4-Clause-UC",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
RT-Thread/rt-thread
|
03a7c52c2aeb1b06a544143b0e803d72f47d1ece
|
3602f891211904a27dcbd51e5ba72fefce7326b2
|
refs/heads/master
| 2023-09-01T04:10:20.295801
| 2023-08-31T16:20:55
| 2023-08-31T16:20:55
| 7,408,108
| 9,599
| 5,805
|
Apache-2.0
| 2023-09-14T13:37:26
| 2013-01-02T14:49:21
|
C
|
UTF-8
|
C
| false
| false
| 368
|
h
|
pm_cfg.h
|
#ifndef __PM_CFG_H__
#define __PM_CFG_H__
enum pm_module_id
{
PM_NONE_ID = 0,
PM_POWER_ID,
PM_BOARD_ID,
PM_LCD_ID,
PM_KEY_ID,
PM_TP_ID,
PM_OTA_ID,
PM_SPI_ID,
PM_I2C_ID,
PM_ADC_ID,
PM_RTC_ID,
PM_GPIO_ID,
PM_UART_ID,
PM_SENSOR_ID,
PM_ALARM_ID,
PM_BLE_ID,
PM_MODULE_MAX_ID, /* enum must! */
};
#endif
|
5a4dcb1fbc6c6a846f4057c606e7aa21eeab97c2
|
b2f295072d61bd25c47a05aedc4a5f9a8d9df670
|
/Foundation/Down/Sources/cmark/utf8.h
|
8e45714d4ae9030199a917d20ea3bc4c46a23cc1
|
[
"CC-BY-SA-4.0",
"BSD-2-Clause",
"MIT",
"Unlicense"
] |
permissive
|
SailyTeam/Saily
|
8f48fde0abec0c5c79e53e5db31af17351958962
|
c0ae33ee26db2ecbab7d33e42c15148cad80f22f
|
refs/heads/main-2.0
| 2022-12-23T05:41:58.080306
| 2022-12-22T12:30:53
| 2022-12-22T12:33:52
| 280,041,061
| 796
| 63
|
Unlicense
| 2022-12-22T11:22:33
| 2020-07-16T03:16:41
|
Swift
|
UTF-8
|
C
| false
| false
| 620
|
h
|
utf8.h
|
#ifndef CMARK_UTF8_H
#define CMARK_UTF8_H
#include <stdint.h>
#include "buffer.h"
#ifdef __cplusplus
extern "C" {
#endif
void cmark_utf8proc_case_fold(cmark_strbuf *dest, const uint8_t *str,
bufsize_t len);
void cmark_utf8proc_encode_char(int32_t uc, cmark_strbuf *buf);
int cmark_utf8proc_iterate(const uint8_t *str, bufsize_t str_len, int32_t *dst);
void cmark_utf8proc_check(cmark_strbuf *dest, const uint8_t *line,
bufsize_t size);
int cmark_utf8proc_is_space(int32_t uc);
int cmark_utf8proc_is_punctuation(int32_t uc);
#ifdef __cplusplus
}
#endif
#endif
|
10acc179310784823212a0025cc14508db6171df
|
5eff7a36d9a9917dce9111f0c3074375fe6f7656
|
/lib/mesa/src/broadcom/qpu/tests/qpu_disasm.c
|
2f8e19c73fedb491c6020ebbc7ebdd3c5ddf3959
|
[] |
no_license
|
openbsd/xenocara
|
cb392d02ebba06f6ff7d826fd8a89aa3b8401779
|
a012b5de33ea0b977095d77316a521195b26cc6b
|
refs/heads/master
| 2023-08-25T12:16:58.862008
| 2023-08-12T16:16:25
| 2023-08-12T16:16:25
| 66,967,384
| 177
| 66
| null | 2023-07-22T18:12:37
| 2016-08-30T18:36:01
|
C
|
UTF-8
|
C
| false
| false
| 9,679
|
c
|
qpu_disasm.c
|
/*
* Copyright © 2016 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <stdio.h>
#include <string.h>
#include "util/macros.h"
#include "util/ralloc.h"
#include "broadcom/common/v3d_device_info.h"
#include "broadcom/qpu/qpu_disasm.h"
#include "broadcom/qpu/qpu_instr.h"
static const struct {
int ver;
uint64_t inst;
const char *expected;
} tests[] = {
{ 33, 0x3d003186bb800000ull, "nop ; nop ; ldvary" },
{ 33, 0x3c20318105829000ull, "fadd r1, r1, r5 ; nop ; thrsw" },
{ 33, 0x3c403186bb81d000ull, "vpmsetup -, r5 ; nop ; ldunif" },
{ 33, 0x3f003186bb800000ull, "nop ; nop ; ldvpm" },
{ 33, 0x3c002380b6edb000ull, "or rf0, r3, r3 ; mov vpm, r3" },
{ 33, 0x57403006bbb80000ull, "nop ; fmul r0, rf0, r5 ; ldvpm; ldunif" },
{ 33, 0x9c094adef634b000ull, "ffloor.ifb rf30.l, r3 ; fmul.pushz rf43.l, r5, r1.h" },
{ 33, 0xb0044c56ba326840ull, "flpop rf22, rf33 ; fmul.pushz rf49.l, r4.h, r1.abs" },
/* vfmul input packing */
{ 33, 0x101e8b6e8aad4000ull, "fmax.nornn rf46, r4.l, r2.l ; vfmul.ifnb rf45, r3, r5" },
{ 33, 0x1857d3c219825000ull, "faddnf.norc r2.l, r5.l, r4 ; vfmul.ifb rf15, r0.ll, r4 ; ldunif" },
{ 33, 0x1c0a0dfde2294000ull, "fcmp.ifna rf61.h, r4.abs, r2.l; vfmul rf55, r2.hh, r1" },
{ 33, 0x2011c89b402cc000ull, "fsub.norz rf27, r4.abs, r1.abs; vfmul.ifa rf34, r3.swp, r1" },
{ 33, 0xe01b42ab3bb063c0ull, "vfpack.andnc rf43, rf15.l, r0.h; fmul.ifna rf10.h, r4.l, r5.abs" },
{ 33, 0x600b8b87fb4d1000ull, "fdx.ifnb rf7.h, r1.l ; fmul.pushn rf46, r3.l, r2.abs" },
/* small immediates */
{ 33, 0x5de24398bbdc6218ull, "vflb.andnn rf24 ; fmul rf14, -8, rf8.h" },
{ 33, 0x25ef83d8b166f00full, "vfmin.pushn rf24, 15.ff, r5 ; smul24.ifnb rf15, r1, r3" },
{ 33, 0xadedcdf70839f990ull, "faddnf.pushc rf55, -16.l, r3.abs; fmul.ifb rf55.l, rf38.l, r1.h" },
{ 33, 0x7dff89fa6a01f020ull, "fsub.nornc rf58.h, 0x3b800000.l, r3.l; fmul.ifnb rf39, r0.h, r0.h" },
/* branch conditions */
{ 33, 0x02000006002034c0ull, "b.anyap rf19" },
{ 33, 0x02679356b4201000ull, "b.anyap -1268280496" },
{ 33, 0x02b76a2dd0400000ull, "b.anynaq zero_addr+0xd0b76a28" },
{ 33, 0x0200000500402000ull, "b.anynaq lri" },
{ 33, 0x0216fe167301c8c0ull, "bu.anya zero_addr+0x7316fe10, rf35" },
{ 33, 0x020000050040e000ull, "bu.anynaq lri, r:unif" },
{ 33, 0x0200000300006000ull, "bu.na0 lri, a:unif" },
/* Special waddr names */
{ 33, 0x3c00318735808000ull, "vfpack tlb, r0, r1 ; nop" },
{ 33, 0xe0571c938e8d5000ull, "fmax.andc recip, r5.h, r2.l ; fmul.ifb rf50.h, r3.l, r4.abs; ldunif" },
{ 33, 0xc04098d4382c9000ull, "add.pushn rsqrt, r1, r1 ; fmul rf35.h, r3.abs, r1.abs ; ldunif" },
{ 33, 0x481edcd6b3184500ull, "vfmin.norn log, r4.hh, r0 ; fmul.ifnb rf51, rf20.abs, r0.l" },
{ 33, 0x041618d57c453000ull, "shl.andn exp, r3, r2 ; add.ifb rf35, r1, r2" },
{ 33, 0x7048e5da49272800ull, "fsub.ifa rf26, r2.l, rf32 ; fmul.pushc sin, r1.h, r1.abs; ldunif" },
/* v4.1 signals */
{ 41, 0x1f010520cf60a000ull, "fcmp.andz rf32, r2.h, r1.h ; vfmul rf20, r0.hh, r3 ; ldunifa" },
{ 41, 0x932045e6c16ea000ull, "fcmp rf38, r2.abs, r5 ; fmul rf23.l, r3, r3.abs ; ldunifarf.rf1" },
{ 41, 0xd72f0434e43ae5c0ull, "fcmp rf52.h, rf23, r5.abs ; fmul rf16.h, rf23, r1 ; ldunifarf.rf60" },
{ 41, 0xdb3048eb9d533780ull, "fmax rf43.l, r3.h, rf30 ; fmul rf35.h, r4, r2.l ; ldunifarf.r1" },
{ 41, 0x733620471e6ce700ull, "faddnf rf7.l, rf28.h, r1.l ; fmul r1, r3.h, r3.abs ; ldunifarf.rsqrt2" },
{ 41, 0x9c094adef634b000ull, "ffloor.ifb rf30.l, r3 ; fmul.pushz rf43.l, r5, r1.h" },
/* v4.1 opcodes */
{ 41, 0x3de020c7bdfd200dull, "ldvpmg_in rf7, r2, r2 ; mov r3, 13" },
{ 41, 0x3de02040f8ff7201ull, "stvpmv 1, rf8 ; mov r1, 1" },
{ 41, 0xd8000e50bb2d3000ull, "sampid rf16 ; fmul rf57.h, r3, r1.l" },
/* v4.1 SFU instructions. */
{ 41, 0xe98d60c1ba2aef80ull, "recip rf1, rf62 ; fmul r3.h, r2.l, r1.l ; ldunifrf.rf53" },
{ 41, 0x7d87c2debc51c000ull, "rsqrt rf30, r4 ; fmul rf11, r4.h, r2.h ; ldunifrf.rf31" },
{ 41, 0xb182475abc2bb000ull, "rsqrt2 rf26, r3 ; fmul rf29.l, r2.h, r1.abs ; ldunifrf.rf9" },
{ 41, 0x79880808bc0b6900ull, "sin rf8, rf36 ; fmul rf32, r2.h, r0.l ; ldunifrf.rf32" },
{ 41, 0x04092094bc5a28c0ull, "exp.ifb rf20, r2 ; add r2, rf35, r2" },
{ 41, 0xe00648bfbc32a000ull, "log rf63, r2 ; fmul.andnn rf34.h, r4.l, r1.abs" },
/* v4.2 changes */
{ 42, 0x3c203192bb814000ull, "barrierid syncb ; nop ; thrsw" },
};
static void
swap_mux(enum v3d_qpu_mux *a, enum v3d_qpu_mux *b)
{
enum v3d_qpu_mux t = *a;
*a = *b;
*b = t;
}
static void
swap_pack(enum v3d_qpu_input_unpack *a, enum v3d_qpu_input_unpack *b)
{
enum v3d_qpu_input_unpack t = *a;
*a = *b;
*b = t;
}
int
main(int argc, char **argv)
{
struct v3d_device_info devinfo = { };
int retval = 0;
for (int i = 0; i < ARRAY_SIZE(tests); i++) {
devinfo.ver = tests[i].ver;
printf("Testing v%d.%d 0x%016llx (\"%s\")... ",
devinfo.ver / 10, devinfo.ver % 10,
(long long)tests[i].inst,
tests[i].expected);
const char *disasm_output = v3d_qpu_disasm(&devinfo,
tests[i].inst);
printf("%s\n", disasm_output);
if (strcmp(disasm_output, tests[i].expected) != 0) {
printf("FAIL\n");
printf(" Expected: \"%s\"\n", tests[i].expected);
printf(" Got: \"%s\"\n", disasm_output);
retval = 1;
continue;
}
ralloc_free((void *)disasm_output);
struct v3d_qpu_instr instr;
if (!v3d_qpu_instr_unpack(&devinfo, tests[i].inst, &instr)) {
printf("FAIL (unpack) %s\n", tests[i].expected);
retval = 1;
continue;
}
if (instr.type == V3D_QPU_INSTR_TYPE_ALU) {
switch (instr.alu.add.op) {
case V3D_QPU_A_FADD:
case V3D_QPU_A_FADDNF:
case V3D_QPU_A_FMIN:
case V3D_QPU_A_FMAX:
/* Swap the operands to be sure that we test
* how the QPUs distinguish between these ops.
*/
swap_mux(&instr.alu.add.a,
&instr.alu.add.b);
swap_pack(&instr.alu.add.a_unpack,
&instr.alu.add.b_unpack);
break;
default:
break;
}
}
uint64_t repack;
if (!v3d_qpu_instr_pack(&devinfo, &instr, &repack)) {
printf("FAIL (pack) %s\n", tests[i].expected);
retval = 1;
continue;
}
if (repack != tests[i].inst) {
printf("FAIL (repack) 0x%016llx\n", (long long)repack);
printf(" Expected: \"%s\"\n", tests[i].expected);
const char *redisasm = v3d_qpu_disasm(&devinfo, repack);
printf(" Got: \"%s\"\n", redisasm);
retval = 1;
ralloc_free((void *)redisasm);
}
printf("PASS\n");
}
return retval;
}
|
46a32a1d4442313a8933a5e51e706d8b91836a98
|
8838eb997879add5759b6dfb23f9a646464e53ca
|
/src/include/mem/sysmalloc.h
|
b13c04324808ac16accbdc5e617daccf965d6886
|
[
"BSD-2-Clause"
] |
permissive
|
embox/embox
|
d6aacec876978522f01cdc4b8de37a668c6f4c80
|
98e3c06e33f3fdac10a29c069c20775568e0a6d1
|
refs/heads/master
| 2023-09-04T03:02:20.165042
| 2023-09-02T14:55:31
| 2023-09-02T14:55:31
| 33,078,138
| 1,087
| 325
|
BSD-2-Clause
| 2023-09-14T16:58:34
| 2015-03-29T15:27:48
|
C
|
UTF-8
|
C
| false
| false
| 795
|
h
|
sysmalloc.h
|
/**
* @file
* @brief Dynamic memory allocator arbitrary size. Terehov style.
* @details XXX sysmalloc docs
*
* @date 02.05.10
* @author Michail Skorginskii
*/
#ifndef MEM_SYSMALLOC_H_
#define MEM_SYSMALLOC_H_
#include <stddef.h>
/**
* Dynamic memory allocator arbitrary size.
*
* @param size requested memory size
*
* @return pointer to the memory of the requested size.
* @retval 0 if there are no memory
*/
extern void *sysmalloc(size_t size /*, int priority */);
/**
* Free memory function.
*
* @param ptr pointer at the memory, that must be free
*/
extern void sysfree(void *ptr);
extern void *sysmemalign(size_t boundary, size_t size);
extern void *sysrealloc(void *ptr, size_t size);
extern void *syscalloc(size_t nmemb, size_t size);
#endif /* MEM_SYSMALLOC_H_ */
|
4a50a68fafcbedf85ed98db0439c573f277154a5
|
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
|
/SOFTWARE/A64-TERES/linux-a64/drivers/rtc/rtc-vr41xx.c
|
009f52550ea2775a10e5323070b6b598dda163d5
|
[
"Linux-syscall-note",
"GPL-2.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
OLIMEX/DIY-LAPTOP
|
ae82f4ee79c641d9aee444db9a75f3f6709afa92
|
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
|
refs/heads/rel3
| 2023-08-04T01:54:19.483792
| 2023-04-03T07:18:12
| 2023-04-03T07:18:12
| 80,094,055
| 507
| 92
|
Apache-2.0
| 2023-04-03T07:05:59
| 2017-01-26T07:25:50
|
C
|
UTF-8
|
C
| false
| false
| 9,501
|
c
|
rtc-vr41xx.c
|
/*
* Driver for NEC VR4100 series Real Time Clock unit.
*
* Copyright (C) 2003-2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/log2.h>
#include <asm/div64.h>
#include <asm/io.h>
#include <asm/uaccess.h>
MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_DESCRIPTION("NEC VR4100 series RTC driver");
MODULE_LICENSE("GPL v2");
/* RTC 1 registers */
#define ETIMELREG 0x00
#define ETIMEMREG 0x02
#define ETIMEHREG 0x04
/* RFU */
#define ECMPLREG 0x08
#define ECMPMREG 0x0a
#define ECMPHREG 0x0c
/* RFU */
#define RTCL1LREG 0x10
#define RTCL1HREG 0x12
#define RTCL1CNTLREG 0x14
#define RTCL1CNTHREG 0x16
#define RTCL2LREG 0x18
#define RTCL2HREG 0x1a
#define RTCL2CNTLREG 0x1c
#define RTCL2CNTHREG 0x1e
/* RTC 2 registers */
#define TCLKLREG 0x00
#define TCLKHREG 0x02
#define TCLKCNTLREG 0x04
#define TCLKCNTHREG 0x06
/* RFU */
#define RTCINTREG 0x1e
#define TCLOCK_INT 0x08
#define RTCLONG2_INT 0x04
#define RTCLONG1_INT 0x02
#define ELAPSEDTIME_INT 0x01
#define RTC_FREQUENCY 32768
#define MAX_PERIODIC_RATE 6553
static void __iomem *rtc1_base;
static void __iomem *rtc2_base;
#define rtc1_read(offset) readw(rtc1_base + (offset))
#define rtc1_write(offset, value) writew((value), rtc1_base + (offset))
#define rtc2_read(offset) readw(rtc2_base + (offset))
#define rtc2_write(offset, value) writew((value), rtc2_base + (offset))
static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
static DEFINE_SPINLOCK(rtc_lock);
static char rtc_name[] = "RTC";
static unsigned long periodic_count;
static unsigned int alarm_enabled;
static int aie_irq;
static int pie_irq;
static inline unsigned long read_elapsed_second(void)
{
unsigned long first_low, first_mid, first_high;
unsigned long second_low, second_mid, second_high;
do {
first_low = rtc1_read(ETIMELREG);
first_mid = rtc1_read(ETIMEMREG);
first_high = rtc1_read(ETIMEHREG);
second_low = rtc1_read(ETIMELREG);
second_mid = rtc1_read(ETIMEMREG);
second_high = rtc1_read(ETIMEHREG);
} while (first_low != second_low || first_mid != second_mid ||
first_high != second_high);
return (first_high << 17) | (first_mid << 1) | (first_low >> 15);
}
static inline void write_elapsed_second(unsigned long sec)
{
spin_lock_irq(&rtc_lock);
rtc1_write(ETIMELREG, (uint16_t)(sec << 15));
rtc1_write(ETIMEMREG, (uint16_t)(sec >> 1));
rtc1_write(ETIMEHREG, (uint16_t)(sec >> 17));
spin_unlock_irq(&rtc_lock);
}
static void vr41xx_rtc_release(struct device *dev)
{
spin_lock_irq(&rtc_lock);
rtc1_write(ECMPLREG, 0);
rtc1_write(ECMPMREG, 0);
rtc1_write(ECMPHREG, 0);
rtc1_write(RTCL1LREG, 0);
rtc1_write(RTCL1HREG, 0);
spin_unlock_irq(&rtc_lock);
disable_irq(aie_irq);
disable_irq(pie_irq);
}
static int vr41xx_rtc_read_time(struct device *dev, struct rtc_time *time)
{
unsigned long epoch_sec, elapsed_sec;
epoch_sec = mktime(epoch, 1, 1, 0, 0, 0);
elapsed_sec = read_elapsed_second();
rtc_time_to_tm(epoch_sec + elapsed_sec, time);
return 0;
}
static int vr41xx_rtc_set_time(struct device *dev, struct rtc_time *time)
{
unsigned long epoch_sec, current_sec;
epoch_sec = mktime(epoch, 1, 1, 0, 0, 0);
current_sec = mktime(time->tm_year + 1900, time->tm_mon + 1, time->tm_mday,
time->tm_hour, time->tm_min, time->tm_sec);
write_elapsed_second(current_sec - epoch_sec);
return 0;
}
static int vr41xx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
unsigned long low, mid, high;
struct rtc_time *time = &wkalrm->time;
spin_lock_irq(&rtc_lock);
low = rtc1_read(ECMPLREG);
mid = rtc1_read(ECMPMREG);
high = rtc1_read(ECMPHREG);
wkalrm->enabled = alarm_enabled;
spin_unlock_irq(&rtc_lock);
rtc_time_to_tm((high << 17) | (mid << 1) | (low >> 15), time);
return 0;
}
static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
{
unsigned long alarm_sec;
struct rtc_time *time = &wkalrm->time;
alarm_sec = mktime(time->tm_year + 1900, time->tm_mon + 1, time->tm_mday,
time->tm_hour, time->tm_min, time->tm_sec);
spin_lock_irq(&rtc_lock);
if (alarm_enabled)
disable_irq(aie_irq);
rtc1_write(ECMPLREG, (uint16_t)(alarm_sec << 15));
rtc1_write(ECMPMREG, (uint16_t)(alarm_sec >> 1));
rtc1_write(ECMPHREG, (uint16_t)(alarm_sec >> 17));
if (wkalrm->enabled)
enable_irq(aie_irq);
alarm_enabled = wkalrm->enabled;
spin_unlock_irq(&rtc_lock);
return 0;
}
static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case RTC_EPOCH_READ:
return put_user(epoch, (unsigned long __user *)arg);
case RTC_EPOCH_SET:
/* Doesn't support before 1900 */
if (arg < 1900)
return -EINVAL;
epoch = arg;
break;
default:
return -ENOIOCTLCMD;
}
return 0;
}
static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
spin_lock_irq(&rtc_lock);
if (enabled) {
if (!alarm_enabled) {
enable_irq(aie_irq);
alarm_enabled = 1;
}
} else {
if (alarm_enabled) {
disable_irq(aie_irq);
alarm_enabled = 0;
}
}
spin_unlock_irq(&rtc_lock);
return 0;
}
static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = (struct platform_device *)dev_id;
struct rtc_device *rtc = platform_get_drvdata(pdev);
rtc2_write(RTCINTREG, ELAPSEDTIME_INT);
rtc_update_irq(rtc, 1, RTC_AF);
return IRQ_HANDLED;
}
static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = (struct platform_device *)dev_id;
struct rtc_device *rtc = platform_get_drvdata(pdev);
unsigned long count = periodic_count;
rtc2_write(RTCINTREG, RTCLONG1_INT);
rtc1_write(RTCL1LREG, count);
rtc1_write(RTCL1HREG, count >> 16);
rtc_update_irq(rtc, 1, RTC_PF);
return IRQ_HANDLED;
}
static const struct rtc_class_ops vr41xx_rtc_ops = {
.release = vr41xx_rtc_release,
.ioctl = vr41xx_rtc_ioctl,
.read_time = vr41xx_rtc_read_time,
.set_time = vr41xx_rtc_set_time,
.read_alarm = vr41xx_rtc_read_alarm,
.set_alarm = vr41xx_rtc_set_alarm,
.alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
};
static int rtc_probe(struct platform_device *pdev)
{
struct resource *res;
struct rtc_device *rtc;
int retval;
if (pdev->num_resources != 4)
return -EBUSY;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EBUSY;
rtc1_base = ioremap(res->start, resource_size(res));
if (!rtc1_base)
return -EBUSY;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
retval = -EBUSY;
goto err_rtc1_iounmap;
}
rtc2_base = ioremap(res->start, resource_size(res));
if (!rtc2_base) {
retval = -EBUSY;
goto err_rtc1_iounmap;
}
rtc = rtc_device_register(rtc_name, &pdev->dev, &vr41xx_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
retval = PTR_ERR(rtc);
goto err_iounmap_all;
}
rtc->max_user_freq = MAX_PERIODIC_RATE;
spin_lock_irq(&rtc_lock);
rtc1_write(ECMPLREG, 0);
rtc1_write(ECMPMREG, 0);
rtc1_write(ECMPHREG, 0);
rtc1_write(RTCL1LREG, 0);
rtc1_write(RTCL1HREG, 0);
spin_unlock_irq(&rtc_lock);
aie_irq = platform_get_irq(pdev, 0);
if (aie_irq <= 0) {
retval = -EBUSY;
goto err_device_unregister;
}
retval = request_irq(aie_irq, elapsedtime_interrupt, 0,
"elapsed_time", pdev);
if (retval < 0)
goto err_device_unregister;
pie_irq = platform_get_irq(pdev, 1);
if (pie_irq <= 0)
goto err_free_irq;
retval = request_irq(pie_irq, rtclong1_interrupt, 0,
"rtclong1", pdev);
if (retval < 0)
goto err_free_irq;
platform_set_drvdata(pdev, rtc);
disable_irq(aie_irq);
disable_irq(pie_irq);
dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n");
return 0;
err_free_irq:
free_irq(aie_irq, pdev);
err_device_unregister:
rtc_device_unregister(rtc);
err_iounmap_all:
iounmap(rtc2_base);
rtc2_base = NULL;
err_rtc1_iounmap:
iounmap(rtc1_base);
rtc1_base = NULL;
return retval;
}
static int rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtc;
rtc = platform_get_drvdata(pdev);
if (rtc)
rtc_device_unregister(rtc);
platform_set_drvdata(pdev, NULL);
free_irq(aie_irq, pdev);
free_irq(pie_irq, pdev);
if (rtc1_base)
iounmap(rtc1_base);
if (rtc2_base)
iounmap(rtc2_base);
return 0;
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:RTC");
static struct platform_driver rtc_platform_driver = {
.probe = rtc_probe,
.remove = rtc_remove,
.driver = {
.name = rtc_name,
},
};
module_platform_driver(rtc_platform_driver);
|
6ef6e33a22efec7f8af308f5640304ffa3aff974
|
2c5414ed169892c538cd802c06d67001d9e4b960
|
/plugins/sudoers/fmtsudoers_cvt.c
|
ab55d11950cc0c4ab907f131d934fb74652a1c2e
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"Zlib",
"BSD-Source-Code",
"ISC",
"BSD-2-Clause"
] |
permissive
|
sudo-project/sudo
|
bdfeebb02cb39151fc4cfe69366b887f092e0c16
|
956de5cbbc650d6aec19804cd376a39164e76e5b
|
refs/heads/main
| 2023-09-04T11:12:09.797952
| 2023-09-02T21:25:58
| 2023-09-02T21:25:58
| 57,972,154
| 922
| 216
|
NOASSERTION
| 2023-08-18T00:43:55
| 2016-05-03T13:41:24
|
C
|
UTF-8
|
C
| false
| false
| 6,847
|
c
|
fmtsudoers_cvt.c
|
/*
* SPDX-License-Identifier: ISC
*
* Copyright (c) 2004-2005, 2007-2021, 2023 Todd C. Miller <Todd.Miller@sudo.ws>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This is an open source non-commercial project. Dear PVS-Studio, please check it.
* PVS-Studio Static Code Analyzer for C, C++ and C#: http://www.viva64.com
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "sudoers.h"
#include "sudo_lbuf.h"
#include <gram.h>
/*
* Write a privilege to lbuf in sudoers format.
*/
bool
sudoers_format_privilege(struct sudo_lbuf *lbuf,
const struct sudoers_parse_tree *parse_tree, const struct privilege *priv,
bool expand_aliases)
{
const struct cmndspec *cs, *prev_cs;
const struct member *m;
struct cmndtag tags;
debug_decl(sudoers_format_privilege, SUDOERS_DEBUG_UTIL);
/* Convert per-privilege defaults to tags. */
sudoers_defaults_list_to_tags(&priv->defaults, &tags);
/* Print hosts list. */
TAILQ_FOREACH(m, &priv->hostlist, entries) {
if (m != TAILQ_FIRST(&priv->hostlist))
sudo_lbuf_append(lbuf, ", ");
sudoers_format_member(lbuf, parse_tree, m, ", ",
expand_aliases ? HOSTALIAS : UNSPEC);
}
/* Print commands. */
sudo_lbuf_append(lbuf, " = ");
prev_cs = NULL;
TAILQ_FOREACH(cs, &priv->cmndlist, entries) {
if (prev_cs == NULL || RUNAS_CHANGED(cs, prev_cs)) {
if (cs != TAILQ_FIRST(&priv->cmndlist))
sudo_lbuf_append(lbuf, ", ");
if (cs->runasuserlist != NULL || cs->runasgrouplist != NULL)
sudo_lbuf_append(lbuf, "(");
if (cs->runasuserlist != NULL) {
TAILQ_FOREACH(m, cs->runasuserlist, entries) {
if (m != TAILQ_FIRST(cs->runasuserlist))
sudo_lbuf_append(lbuf, ", ");
sudoers_format_member(lbuf, parse_tree, m, ", ",
expand_aliases ? RUNASALIAS : UNSPEC);
}
}
if (cs->runasgrouplist != NULL) {
sudo_lbuf_append(lbuf, " : ");
TAILQ_FOREACH(m, cs->runasgrouplist, entries) {
if (m != TAILQ_FIRST(cs->runasgrouplist))
sudo_lbuf_append(lbuf, ", ");
sudoers_format_member(lbuf, parse_tree, m, ", ",
expand_aliases ? RUNASALIAS : UNSPEC);
}
}
if (cs->runasuserlist != NULL || cs->runasgrouplist != NULL)
sudo_lbuf_append(lbuf, ") ");
} else if (cs != TAILQ_FIRST(&priv->cmndlist)) {
sudo_lbuf_append(lbuf, ", ");
}
sudoers_format_cmndspec(lbuf, parse_tree, cs, prev_cs, tags,
expand_aliases);
prev_cs = cs;
}
debug_return_bool(!sudo_lbuf_error(lbuf));
}
/*
* Write a userspec to lbuf in sudoers format.
*/
bool
sudoers_format_userspec(struct sudo_lbuf *lbuf,
const struct sudoers_parse_tree *parse_tree,
const struct userspec *us, bool expand_aliases)
{
const struct sudoers_comment *comment;
const struct privilege *priv;
const struct member *m;
debug_decl(sudoers_format_userspec, SUDOERS_DEBUG_UTIL);
/* Print comments (if any). */
STAILQ_FOREACH(comment, &us->comments, entries) {
sudo_lbuf_append(lbuf, "# %s\n", comment->str);
}
/* Print users list. */
TAILQ_FOREACH(m, &us->users, entries) {
if (m != TAILQ_FIRST(&us->users))
sudo_lbuf_append(lbuf, ", ");
sudoers_format_member(lbuf, parse_tree, m, ", ",
expand_aliases ? USERALIAS : UNSPEC);
}
TAILQ_FOREACH(priv, &us->privileges, entries) {
if (priv != TAILQ_FIRST(&us->privileges))
sudo_lbuf_append(lbuf, " : ");
else
sudo_lbuf_append(lbuf, " ");
if (!sudoers_format_privilege(lbuf, parse_tree, priv, expand_aliases))
break;
}
sudo_lbuf_append(lbuf, "\n");
debug_return_bool(!sudo_lbuf_error(lbuf));
}
/*
* Write a userspec_list to lbuf in sudoers format.
*/
bool
sudoers_format_userspecs(struct sudo_lbuf *lbuf,
const struct sudoers_parse_tree *parse_tree, const char *separator,
bool expand_aliases, bool flush)
{
const struct userspec *us;
debug_decl(sudoers_format_userspecs, SUDOERS_DEBUG_UTIL);
TAILQ_FOREACH(us, &parse_tree->userspecs, entries) {
if (separator != NULL && us != TAILQ_FIRST(&parse_tree->userspecs))
sudo_lbuf_append(lbuf, "%s", separator);
if (!sudoers_format_userspec(lbuf, parse_tree, us, expand_aliases))
break;
sudo_lbuf_print(lbuf);
}
debug_return_bool(!sudo_lbuf_error(lbuf));
}
/*
* Format and append a defaults line to the specified lbuf.
* If next, is specified, it must point to the next defaults
* entry in the list; this is used to print multiple defaults
* entries with the same binding on a single line.
*/
bool
sudoers_format_default_line(struct sudo_lbuf *lbuf,
const struct sudoers_parse_tree *parse_tree, const struct defaults *d,
struct defaults **next, bool expand_aliases)
{
const struct member *m;
short alias_type;
debug_decl(sudoers_format_default_line, SUDOERS_DEBUG_UTIL);
/* Print Defaults type and binding (if present) */
switch (d->type) {
case DEFAULTS_HOST:
sudo_lbuf_append(lbuf, "Defaults@");
alias_type = expand_aliases ? HOSTALIAS : UNSPEC;
break;
case DEFAULTS_USER:
sudo_lbuf_append(lbuf, "Defaults:");
alias_type = expand_aliases ? USERALIAS : UNSPEC;
break;
case DEFAULTS_RUNAS:
sudo_lbuf_append(lbuf, "Defaults>");
alias_type = expand_aliases ? RUNASALIAS : UNSPEC;
break;
case DEFAULTS_CMND:
sudo_lbuf_append(lbuf, "Defaults!");
alias_type = expand_aliases ? CMNDALIAS : UNSPEC;
break;
default:
sudo_lbuf_append(lbuf, "Defaults");
alias_type = UNSPEC;
break;
}
TAILQ_FOREACH(m, &d->binding->members, entries) {
if (m != TAILQ_FIRST(&d->binding->members))
sudo_lbuf_append(lbuf, ", ");
sudoers_format_member(lbuf, parse_tree, m, ", ", alias_type);
}
sudo_lbuf_append(lbuf, " ");
sudoers_format_default(lbuf, d);
if (next != NULL) {
/* Merge Defaults with the same binding, there may be multiple. */
struct defaults *n;
while ((n = TAILQ_NEXT(d, entries)) && d->binding == n->binding) {
sudo_lbuf_append(lbuf, ", ");
sudoers_format_default(lbuf, n);
d = n;
}
*next = n;
}
sudo_lbuf_append(lbuf, "\n");
debug_return_bool(!sudo_lbuf_error(lbuf));
}
|
4ad2d85ee0a2c04c875c90dd8cb0d4dea06515a4
|
1577e1cf4e89584a125cffb855ca50a9654c6d55
|
/libpcap/libpcap/tests/ngofflinereadtest/ngofflinereadtest.c
|
acc32adab66fe25606293c125e44860950bd3273
|
[
"BSD-3-Clause"
] |
permissive
|
apple-open-source/macos
|
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
|
2d2b15f13487673de33297e49f00ef94af743a9a
|
refs/heads/master
| 2023-08-01T11:03:26.870408
| 2023-03-27T00:00:00
| 2023-03-27T00:00:00
| 180,595,052
| 124
| 24
| null | 2022-12-27T14:54:09
| 2019-04-10T14:06:23
| null |
UTF-8
|
C
| false
| false
| 19,752
|
c
|
ngofflinereadtest.c
|
/*
* Copyright (c) 2012-2018 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include "pcap/pcap-ng.h"
#include <stdio.h>
#include <err.h>
#include <stdlib.h>
#include <string.h>
#include <libgen.h>
#include <sysexits.h>
struct section_info {
struct section_info *next;
struct pcapng_section_header_fields shb;
pcap_t *pcap;
u_int32_t if_count;
struct interface_info *if_list;
};
struct interface_info {
struct interface_info *next;
struct pcapng_interface_description_fields idb;
struct section_info *section_info;
u_int32_t interface_id;
char *if_name;
char *if_desc;
};
struct section_info *section_list = NULL;
struct section_info *current_section = NULL;
int mode_raw = 0;
int mode_block = 0;
int mode_pcap = 0;
int mode_test = 0;
#define PAD32(x) (((x) + 3) & ~3)
void hex_and_ascii_print(const char *, const void *, size_t, const char *);
struct section_info *
new_section_info(pcap_t *pcap, struct pcapng_section_header_fields *shb)
{
struct section_info *section_info = calloc(1, sizeof(struct section_info));
if (section_info == NULL)
return NULL;
section_info->pcap = pcap;
section_info->shb = *shb;
if (section_list == NULL) {
section_list = section_info;
} else {
section_info->next = section_list;
section_list = section_info;
}
current_section = section_info;
return section_info;
}
void
interface_option_iterator(pcapng_block_t block, struct pcapng_option_info *option_info, void *context)
{
struct interface_info *interface_info = (struct interface_info *)context;
switch (option_info->code) {
case 0:
break;
case 1:
break;
case 2:
interface_info->if_name = malloc(option_info->length + 1);
if (interface_info->if_name == NULL)
break;
snprintf(interface_info->if_name, option_info->length + 1, "%s", (char *)option_info->value);
break;
case 3:
interface_info->if_desc = malloc(option_info->length + 1);
if (interface_info->if_desc == NULL)
break;
snprintf(interface_info->if_desc, option_info->length + 1, "%s", (char *)option_info->value);
break;
case 4:
break;
case 5:
break;
case 6:
break;
case 7:
break;
case 8:
break;
case 9:
break;
case 10:
break;
case 11:
break;
case 12:
break;
case 13:
break;
case 14:
break;
default:
break;
}
}
struct interface_info *
new_interface_info(struct section_info *section_info, pcapng_block_t block)
{
if (section_info == NULL)
return NULL;
struct interface_info *interface_info = calloc(1, sizeof(struct interface_info));
if (interface_info == NULL)
return NULL;
interface_info->section_info = section_info;
interface_info->interface_id = section_info->if_count;
section_info->if_count++;
if (section_info->if_list == NULL) {
section_info->if_list = interface_info;
} else {
interface_info->next = section_info->if_list;
section_info->if_list = interface_info;
}
(void) pcnapng_block_iterate_options(block,
interface_option_iterator,
interface_info);
return interface_info;
}
struct interface_info *
find_interface_info_by_id(u_int16_t interface_id)
{
struct interface_info *interface_info;
if (current_section == NULL)
return (NULL);
if (interface_id + 1 > current_section->if_count)
return (NULL);
for (interface_info = current_section->if_list;
interface_info != NULL;
interface_info = interface_info->next) {
if (interface_info->interface_id == interface_id)
return (interface_info);
}
return (NULL);
}
void
block_option_iterator(pcapng_block_t block, struct pcapng_option_info *option_info, void *context)
{
printf(" block_type %u context %p option_code %u value_len %u value_ptr %p\n",
pcap_ng_block_get_type(block), context,
option_info->code, option_info->length,
option_info->value
);
switch (option_info->code) {
case 0:
printf(" opt_endofopt\n");
break;
case 1:
printf(" opt_comment: %-*s\n",
option_info->length, (char *)option_info->value);
break;
default:
/*
* Each block type has its own option code space
*/
switch (pcap_ng_block_get_type(block)) {
case PCAPNG_BT_SHB:
switch (option_info->code) {
case 2:
printf(" shb_hardware: %-*s\n",
option_info->length, (char *)option_info->value);
break;
case 3:
printf(" shb_os: %-*s\n",
option_info->length, (char *)option_info->value);
break;
case 4:
printf(" shb_userappl: %-*s\n",
option_info->length, (char *)option_info->value);
break;
default:
printf(" <unkown shb option>\n");
break;
}
break;
case PCAPNG_BT_IDB:
switch (option_info->code) {
case 2:
printf(" if_name: %-*s\n",
option_info->length, (char *)option_info->value);
break;
case 3:
printf(" if_desc: %-*s\n",
option_info->length, (char *)option_info->value);
break;
case 4:
printf(" if_IPv4addr\n");
break;
case 5:
printf(" if_IPv6addr\n");
break;
case 6:
printf(" if_MACaddr\n");
break;
case 7:
printf(" if_EUIaddr\n");
break;
case 8:
printf(" if_speed\n");
break;
case 9:
printf(" if_tsresol\n");
break;
case 10:
printf(" if_tzone\n");
break;
case 11:
printf(" if_filter %-*s\n",
option_info->length, option_info->value);
break;
case 12:
printf(" if_os %-*s\n",
option_info->length, option_info->value);
break;
case 13:
printf(" if_fcslen\n");
break;
case 14:
printf(" if_tsoffset\n");
break;
default:
printf(" <unkown idb option>\n");
break;
}
break;
case PCAPNG_BT_EPB:
switch (option_info->code) {
case 2:
printf(" epb_flags\n");
break;
case 3:
printf(" epb_hash\n");
break;
case 4:
printf(" epb_dropcount\n");
break;
case PCAPNG_EPB_PIB_INDEX:
printf(" epb_pib\n");
break;
case PCAPNG_EPB_SVC:
printf(" epb_svc\n");
break;
case PCAPNG_EPB_PMD_FLAGS:
printf(" epb_pmd_flags\n");
break;
case PCAPNG_EPB_FLOW_ID:
printf(" epb_flowid\n");
break;
case PCAPNG_EPB_TRACE_TAG:
printf(" epb_trace_tag\n");
break;
default:
printf(" <unkown epb option>\n");
break;
}
break;
case PCAPNG_BT_SPB:
printf(" <invalid spb option>\n");
break;
case PCAPNG_BT_PB:
switch (option_info->code) {
case 2:
printf(" pack_flags\n");
break;
case 3:
printf(" pack_hash\n");
break;
default:
printf(" <unkown pb option>\n");
break;
}
break;
case PCAPNG_BT_PIB: {
switch (option_info->code) {
case 2:
printf(" proc_name\n");
break;
case 3:
printf(" proc_path\n");
break;
case 4:
printf(" proc_uuid\n");
break;
default:
printf(" <unkown pib option>\n");
break;
}
break;
}
case PCAPNG_BT_ISB: {
break;
}
case PCAPNG_BT_NRB: {
break;
}
default:
break;
}
break;
}
if (option_info->value) {
hex_and_ascii_print(" ", option_info->value, option_info->length, "\n");
}
}
void
read_callback(u_char *user, const struct pcap_pkthdr *hdr, const u_char *bytes)
{
pcap_t *pcap = (pcap_t *)user;
struct pcapng_option_info option_info;
u_char *optptr = NULL;
/* Access the raw block */
if (mode_raw) {
struct pcapng_block_header *block_header = (struct pcapng_block_header*)bytes;
printf("raw hdr caplen %u len %u\n", hdr->caplen, hdr->len);
printf("#\n# user %p hdr.caplen %u hdr.len %u block_header.blocktype 0x%x block_header.totallength %u\n",
user, hdr->caplen, hdr->len,
block_header->block_type, block_header->total_length);
hex_and_ascii_print("", bytes, block_header->total_length, "\n");
switch (block_header->block_type) {
case PCAPNG_BT_SHB: {
struct pcapng_section_header_fields *shb = (struct pcapng_section_header_fields *)(block_header + 1);
printf("# Section Header Block\n");
printf(" byte_order_magic 0x%x major_version %u minor_version %u section_length %llu\n",
shb->byte_order_magic, shb->major_version, shb->minor_version, shb->section_length);
hex_and_ascii_print("", shb, sizeof(struct pcapng_section_header_fields), "\n");
optptr = (u_char *)(shb + 1);
break;
}
case PCAPNG_BT_IDB: {
struct pcapng_interface_description_fields *idb = (struct pcapng_interface_description_fields *)(block_header + 1);
printf("# Interface Description Block\n");
printf(" linktype %u reserved %u snaplen %u\n",
idb->idb_linktype, idb->idb_reserved, idb->idb_snaplen);
hex_and_ascii_print("", idb, sizeof(struct pcapng_interface_description_fields), "\n");
optptr = (u_char *)(idb + 1);
break;
}
case PCAPNG_BT_EPB: {
struct pcapng_enhanced_packet_fields *epb = (struct pcapng_enhanced_packet_fields *)(block_header + 1);
printf("# Enhanced Packet Block\n");
printf(" interface_id %u timestamp_high %u timestamp_low %u caplen %u len %u\n",
epb->interface_id, epb->timestamp_high, epb->timestamp_low, epb->caplen, epb->len);
hex_and_ascii_print("", epb, sizeof(struct pcapng_enhanced_packet_fields), "\n");
hex_and_ascii_print("", epb + 1, epb->caplen, "\n");
optptr = (u_char *)(epb + 1);
optptr += PAD32(epb->caplen);
break;
}
case PCAPNG_BT_SPB: {
struct pcapng_simple_packet_fields *spb = (struct pcapng_simple_packet_fields *)(block_header + 1);
printf("# Simple Packet Block\n");
printf(" len %u\n",
spb->len);
hex_and_ascii_print("", spb, sizeof(struct pcapng_simple_packet_fields), "\n");
hex_and_ascii_print("", spb + 1, spb->len, "\n");
break;
}
case PCAPNG_BT_PB: {
struct pcapng_packet_fields *pb = (struct pcapng_packet_fields *)(block_header + 1);
printf("# Packet Block\n");
printf(" interface_id %u drops_count %u timestamp_high %u timestamp_low %u caplen %u len %u\n",
pb->interface_id, pb->drops_count, pb->timestamp_high, pb->timestamp_low, pb->caplen, pb->len);
hex_and_ascii_print("", pb, sizeof(struct pcapng_packet_fields), "\n");
hex_and_ascii_print("", pb + 1, pb->caplen, "\n");
break;
}
case PCAPNG_BT_PIB: {
struct pcapng_process_information_fields *pib = (struct pcapng_process_information_fields *)(block_header + 1);
printf("# Process Information Block\n");
printf(" process_id %u\n",
pib->process_id);
hex_and_ascii_print("", pib, sizeof(struct pcapng_process_information_fields), "\n");
break;
}
case PCAPNG_BT_ISB: {
printf("# Interface Statistics Block\n");
break;
}
case PCAPNG_BT_NRB: {
printf("# Name Record Block\n");
break;
}
case PCAPNG_BT_OSEV: {
printf("# Name Record Block\n");
break;
}
case PCAPNG_BT_DSB: {
printf("# Decryption Secrets Block\n");
break;
}
default:
printf("# Unknown Block\n");
break;
}
if (optptr) {
size_t optlen = block_header->total_length - (optptr - bytes);
hex_and_ascii_print("", optptr, optlen, "\n");
}
}
/* Create block object */
if (mode_block) {
pcapng_block_t block = pcap_ng_block_alloc_with_raw_block(pcap, (u_char *)bytes);
if (block == NULL) {
printf(" pcap_ng_block_alloc_with_raw_block() failed: %s\n", pcap_geterr(pcap));
return;
}
switch (pcap_ng_block_get_type(block)) {
case PCAPNG_BT_SHB: {
struct pcapng_section_header_fields *shb = pcap_ng_get_section_header_fields(block);
printf("# Section Header Block\n");
printf(" byte_order_magic 0x%x major_version %u minor_version %u section_length %llu\n",
shb->byte_order_magic, shb->major_version, shb->minor_version, shb->section_length);
(void)new_section_info(pcap, shb);
break;
}
case PCAPNG_BT_IDB: {
struct pcapng_interface_description_fields *idb = pcap_ng_get_interface_description_fields(block);
printf("# Interface Description Block\n");
printf(" linktype %u reserved %u snaplen %u\n",
idb->idb_linktype, idb->idb_reserved, idb->idb_snaplen);
if (pcap_ng_block_get_option(block, PCAPNG_IF_NAME, &option_info) == 1)
if (option_info.value)
printf(" interface name: %s\n", option_info.value);
(void)new_interface_info(current_section, block);
break;
}
case PCAPNG_BT_EPB: {
struct pcapng_enhanced_packet_fields *epb = pcap_ng_get_enhanced_packet_fields(block);
printf("# Enhanced Packet Block\n");
printf(" interface_id %u timestamp_high %u timestamp_low %u caplen %u len %u\n",
epb->interface_id, epb->timestamp_high, epb->timestamp_low, epb->caplen, epb->len);
break;
}
case PCAPNG_BT_SPB: {
struct pcapng_simple_packet_fields *spb = pcap_ng_get_simple_packet_fields(block);
printf("# Simple Packet Block\n");
printf(" len %u\n",
spb->len);
break;
}
case PCAPNG_BT_PB: {
struct pcapng_packet_fields *pb = pcap_ng_get_packet_fields(block);
printf("# Packet Block\n");
printf(" interface_id %u drops_count %u timestamp_high %u timestamp_low %u caplen %u len %u\n",
pb->interface_id, pb->drops_count, pb->timestamp_high, pb->timestamp_low, pb->caplen, pb->len);
break;
}
case PCAPNG_BT_PIB: {
struct pcapng_process_information_fields *pib = pcap_ng_get_process_information_fields(block);
printf("# Process Information Block\n");
printf(" process_id %u\n",
pib->process_id);
if (pcap_ng_block_get_option(block, PCAPNG_PIB_NAME, &option_info) == 1) {
if (option_info.value)
printf(" process name: %s\n", option_info.value);
}
if (pcap_ng_block_get_option(block, PCAPNG_PIB_UUID, &option_info) == 1) {
if (option_info.value) {
uuid_string_t uu_str;
uuid_unparse_lower(option_info.value, uu_str);
printf(" process uuid: %s\n", uu_str);
}
}
break;
}
case PCAPNG_BT_ISB: {
printf("# Interface Statistics Block\n");
break;
}
case PCAPNG_BT_NRB: {
printf("# Name Record Block\n");
break;
}
case PCAPNG_BT_OSEV: {
struct pcapng_os_event_fields *osev_fields;
printf("# OS Event Block\n");
osev_fields = pcap_ng_get_os_event_fields(block);
printf(" type %u timestamp_high %u timestamp_low %u len %u\n",
osev_fields->type,
osev_fields->timestamp_high,
osev_fields->timestamp_low,
osev_fields->len);
break;
}
case PCAPNG_BT_DSB: {
struct pcapng_decryption_secrets_fields *dsb_fields;
printf("# Decryption Secrets Block\n");
dsb_fields = pcap_ng_get_decryption_secrets_fields(block);
printf(" secrets_type 0x%x secrets_length %u\n",
dsb_fields->secrets_type,
dsb_fields->secrets_length);
break;
}
default:
printf("# Unknown Block\n");
break;
}
if (pcap_ng_block_does_support_data(block)) {
hex_and_ascii_print("", pcap_ng_block_packet_get_data_ptr(block), pcap_ng_block_packet_get_data_len(block), "\n");
}
pcnapng_block_iterate_options(block, block_option_iterator, NULL);
}
}
#define SWAPLONG(y) \
((((y)&0xff)<<24) | (((y)&0xff00)<<8) | (((y)&0xff0000)>>8) | (((y)>>24)&0xff))
#define SWAPSHORT(y) \
( (((y)&0xff)<<8) | ((u_short)((y)&0xff00)>>8) )
#define SWAPLONGLONG(y) \
(SWAPLONG((unsigned long)(y)) << 32 | SWAPLONG((unsigned long)((y) >> 32)))
void
test_pcap_ng_fopen_offline(const char *filename, char *errbuf)
{
FILE *fp;
off_t offset;
pcap_t *pcap;
fp = fopen(filename, "r");
if (fp == NULL) {
warn("fopen(%s) failed", filename);
return;
}
offset = ftello(fp);
pcap = pcap_ng_fopen_offline(fp, errbuf);
if (pcap == NULL) {
warnx("pcap_ng_fopen_offline(%s) failed: %s\n",
filename, errbuf);
if (ftello(fp) != offset)
errx(EX_OSERR, "pcap_ng_fopen_offline(%s) ftello(fp) (%llu) != offset (%llu)",
filename, ftello(fp), offset);
} else {
pcap_close(pcap);
}
fp = fopen(filename, "r");
if (fp == NULL) {
warn("fopen(%s) failed", filename);
return;
}
offset = ftello(fp);
pcap = pcap_fopen_offline(fp, errbuf);
if (pcap == NULL) {
warnx("pcap_fopen_offline(%s) failed: %s\n",
filename, errbuf);
if (ftello(fp) != offset)
errx(EX_OSERR, "pcap_fopen_offline(%s) ftello(fp) (%llu) != offset (%llu)",
filename, ftello(fp), offset);
} else {
pcap_close(pcap);
}
fclose(fp);
fprintf(stderr, "TEST PASSED\n");
}
int
main(int argc, const char * argv[])
{
int i;
char errbuf[PCAP_ERRBUF_SIZE];
for (i = 1; i < argc; i++) {
pcap_t *pcap;
if (strcmp(argv[i], "-h") == 0) {
char *path = strdup((argv[0]));
printf("# usage: %s [-raw] [-block] [-pcap] [-test] file\n", getprogname());
if (path != NULL)
free(path);
exit(0);
} else if (strcmp(argv[i], "-raw") == 0) {
mode_raw = 1;
continue;
} else if (strcmp(argv[i], "-block") == 0) {
mode_block = 1;
continue;
} else if (strcmp(argv[i], "-pcap") == 0) {
mode_pcap = 1;
continue;
} else if (strcmp(argv[i], "-test") == 0) {
mode_test = 1;
continue;
}
printf("#\n# opening %s\n#\n", argv[i]);
if (mode_test != 0) {
test_pcap_ng_fopen_offline(argv[i], errbuf);
continue;
}
if (mode_block == 0 && mode_raw == 0) {
mode_block = 1;
}
if (mode_pcap == 0) {
pcap = pcap_ng_open_offline(argv[i], errbuf);
if (pcap == NULL) {
warnx("pcap_ng_open_offline(%s) failed: %s\n",
argv[i], errbuf);
continue;
}
} else {
pcap = pcap_open_offline(argv[i], errbuf);
if (pcap == NULL) {
warnx("pcap_open_offline(%s) failed: %s\n",
argv[i], errbuf);
continue;
}
}
int result = pcap_dispatch(pcap, -1, read_callback, (u_char *)pcap);
if (result < 0) {
warnx("pcap_dispatch failed: %s\n",
pcap_statustostr(result));
} else {
printf("# read %d packets\n", result);
}
pcap_close(pcap);
}
return 0;
}
|
b1044004083918e693189e5f643c80be2f8c4ace
|
ab2d9dd46478aaeddd9dba4b3c6453c832c86f2d
|
/src/blogc/rusage.c
|
a38848d2369a4bab2b7ba36f1b607c659637c390
|
[
"BSD-3-Clause"
] |
permissive
|
blogc/blogc
|
e30ba50bbb8ee311405e69fa77091c2e0859fac8
|
f35e7ab6965d4da9779e948de71d23892954c1c7
|
refs/heads/master
| 2023-03-15T23:25:59.113703
| 2023-03-05T23:31:09
| 2023-03-05T23:31:09
| 33,968,989
| 223
| 25
|
BSD-3-Clause
| 2018-01-18T13:56:46
| 2015-04-15T02:36:12
|
C
|
UTF-8
|
C
| false
| false
| 2,060
|
c
|
rusage.c
|
/*
* blogc: A blog compiler.
* Copyright (C) 2014-2019 Rafael G. Martins <rafael@rafaelmartins.eng.br>
*
* This program can be distributed under the terms of the BSD License.
* See the file LICENSE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif /* HAVE_CONFIG_H */
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif /* HAVE_SYS_TIME_H */
#ifdef HAVE_SYS_RESOURCE_H
#include <sys/resource.h>
#endif /* HAVE_SYS_RESOURCE_H */
#include <stdlib.h>
#include "../common/utils.h"
#include "rusage.h"
blogc_rusage_t*
blogc_rusage_get(void)
{
#ifndef HAVE_RUSAGE
return NULL;
#else
struct rusage usage;
if (0 != getrusage(RUSAGE_SELF, &usage))
return NULL;
blogc_rusage_t *rv = bc_malloc(sizeof(blogc_rusage_t));
rv->cpu_time = (
(usage.ru_utime.tv_sec * 1000000) + usage.ru_utime.tv_usec +
(usage.ru_stime.tv_sec * 1000000) + usage.ru_stime.tv_usec);
rv->memory = usage.ru_maxrss;
return rv;
#endif
}
char*
blogc_rusage_format_cpu_time(long long time)
{
if (time >= 1000000)
return bc_strdup_printf("%.3fs", ((float) time) / 1000000.0);
// this is a special case: some systems may report the cpu time rounded up to the
// milisecond. it is useless to show ".000" in this case.
if (time >= 1000)
return bc_strdup_printf("%.*fms", time % 1000 ? 3 : 0, ((float) time) / 1000.0);
return bc_strdup_printf("%dus", time);
}
char*
blogc_rusage_format_memory(long mem)
{
if (mem >= 1048576)
return bc_strdup_printf("%.3fGB", ((float) mem) / 1048576.0);
if (mem >= 1024)
return bc_strdup_printf("%.3fMB", ((float) mem) / 1024.0);
return bc_strdup_printf("%dKB", mem);
}
void
blogc_rusage_inject(bc_trie_t *global)
{
blogc_rusage_t *usage = blogc_rusage_get();
if (usage == NULL)
return;
bc_trie_insert(global, "BLOGC_RUSAGE_CPU_TIME",
blogc_rusage_format_cpu_time(usage->cpu_time));
bc_trie_insert(global, "BLOGC_RUSAGE_MEMORY",
blogc_rusage_format_memory(usage->memory));
free(usage);
}
|
378fdac9b8867170f8b844b4e4e759bc497957fe
|
fb0f9abad373cd635c2635bbdf491ea0f32da5ff
|
/src/mono/mono/utils/memfuncs.c
|
5d4b2dccb4babfe6dd443d417dbb340cf2bb9e56
|
[
"MIT"
] |
permissive
|
dotnet/runtime
|
f6fd23936752e202f8e4d6d94f3a4f3b0e77f58f
|
47bb554d298e1e34c4e3895d7731e18ad1c47d02
|
refs/heads/main
| 2023-09-03T15:35:46.493337
| 2023-09-03T08:13:23
| 2023-09-03T08:13:23
| 210,716,005
| 13,765
| 5,179
|
MIT
| 2023-09-14T21:58:52
| 2019-09-24T23:36:39
|
C#
|
UTF-8
|
C
| false
| false
| 11,498
|
c
|
memfuncs.c
|
/**
* \file
* Our own bzero/memmove.
*
* Copyright (C) 2013-2015 Xamarin Inc
*
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
/*
* SGen cannot deal with invalid pointers on the heap or in registered roots. Sometimes we
* need to copy or zero out memory in code that might be interrupted by collections. To
* guarantee that those operations will not result in invalid pointers, we must do it
* word-atomically.
*
* libc's bzero() and memcpy()/memmove() functions do not guarantee word-atomicity, even in
* cases where one would assume so. For instance, some implementations (like Darwin's on
* x86) have variants of memcpy() using vector instructions. Those may copy bytewise for
* the region preceding the first vector-aligned address. That region could be
* word-aligned, but it would still be copied byte-wise.
*
* All our memory writes here are to "volatile" locations. This is so that C compilers
* don't "optimize" our code back to calls to bzero()/memmove(). LLVM, specifically, will
* do that.
*/
#include <config.h>
#include <glib.h>
#include <string.h>
#include <errno.h>
#if defined (__APPLE__)
#include <mach/message.h>
#include <mach/mach_host.h>
#include <mach/host_info.h>
#include <sys/sysctl.h>
#endif
#if defined (__NetBSD__)
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#endif
#if defined (__FreeBSD__)
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/vmmeter.h>
#include <vm/vm_param.h>
#endif
#if defined(TARGET_WIN32)
#include <windows.h>
#endif
#include "memfuncs.h"
#define ptr_mask ((sizeof (void*) - 1))
#define _toi(ptr) ((size_t)ptr)
#define unaligned_bytes(ptr) (_toi(ptr) & ptr_mask)
#define align_down(ptr) ((void*)(_toi(ptr) & ~ptr_mask))
#define align_up(ptr) ((void*) ((_toi(ptr) + ptr_mask) & ~ptr_mask))
#if SIZEOF_VOID_P == 4
#define bytes_to_words(n) ((size_t)(n) >> 2)
#elif SIZEOF_VOID_P == 8
#define bytes_to_words(n) ((size_t)(n) >> 3)
#else
#error We only support 32 and 64 bit architectures.
#endif
#define BZERO_WORDS(dest,words) do { \
void * volatile *__d = (void* volatile*)(dest); \
size_t __n = (words); \
size_t __i; \
for (__i = 0; __i < __n; ++__i) \
__d [__i] = NULL; \
} while (0)
#define MINMEMSZ 20971520 /* Minimum restricted memory size - 20MB */
/**
* mono_gc_bzero_aligned:
* \param dest address to start to clear
* \param size size of the region to clear
*
* Zero \p size bytes starting at \p dest.
* The address of \p dest MUST be aligned to word boundaries
*
* FIXME borrow faster code from some BSD libc or bionic
*/
void
mono_gc_bzero_aligned (void *dest, size_t size)
{
volatile char *d = (char*)dest;
size_t tail_bytes, word_bytes;
g_assert (unaligned_bytes (dest) == 0);
/* copy all words with memmove */
word_bytes = (size_t)align_down (size);
switch (word_bytes) {
case sizeof (void*) * 1:
BZERO_WORDS (d, 1);
break;
case sizeof (void*) * 2:
BZERO_WORDS (d, 2);
break;
case sizeof (void*) * 3:
BZERO_WORDS (d, 3);
break;
case sizeof (void*) * 4:
BZERO_WORDS (d, 4);
break;
default:
BZERO_WORDS (d, bytes_to_words (word_bytes));
}
tail_bytes = unaligned_bytes (size);
if (tail_bytes) {
d += word_bytes;
do {
*d++ = 0;
} while (--tail_bytes);
}
}
/**
* mono_gc_bzero_atomic:
* \param dest address to start to clear
* \param size size of the region to clear
*
* Zero \p size bytes starting at \p dest.
*
* Use this to zero memory without word tearing when \p dest is aligned.
*/
void
mono_gc_bzero_atomic (void *dest, size_t size)
{
if (unaligned_bytes (dest))
memset (dest, 0, size);
else
mono_gc_bzero_aligned (dest, size);
}
#define MEMMOVE_WORDS_UPWARD(dest,src,words) do { \
void * volatile *__d = (void* volatile*)(dest); \
void **__s = (void**)(src); \
size_t __n = (words); \
size_t __i; \
for (__i = 0; __i < __n; ++__i) \
__d [__i] = __s [__i]; \
} while (0)
#define MEMMOVE_WORDS_DOWNWARD(dest,src,words) do { \
void * volatile *__d = (void* volatile*)(dest); \
void **__s = (void**)(src); \
size_t __n = (words); \
size_t __i; \
for (__i = __n; __i-- > 0;) \
__d [__i] = __s [__i]; \
} while (0)
/**
* mono_gc_memmove_aligned:
* \param dest destination of the move
* \param src source
* \param size size of the block to move
*
* Move \p size bytes from \p src to \p dest.
*
* Use this to copy memory without word tearing when both pointers are aligned
*/
void
mono_gc_memmove_aligned (void *dest, const void *src, size_t size)
{
g_assert (unaligned_bytes (dest) == 0);
g_assert (unaligned_bytes (src) == 0);
/*
If we're copying less than a word we don't need to worry about word tearing
so we bailout to memmove early.
*/
if (size < sizeof(void*)) {
memmove (dest, src, size);
return;
}
/*
* A bit of explanation on why we align only dest before doing word copies.
* Pointers to managed objects must always be stored in word aligned addresses, so
* even if dest is misaligned, src will be by the same amount - this ensure proper atomicity of reads.
*
* We don't need to case when source and destination have different alignments since we only do word stores
* using memmove, which must handle it.
*/
if (dest > src && ((size_t)((char*)dest - (char*)src) < size)) { /*backward copy*/
volatile char *p = (char*)dest + size;
char *s = (char*)src + size;
char *start = (char*)dest;
char *align_end = MAX((char*)dest, (char*)align_down (p));
char *word_start;
size_t bytes_to_memmove;
while (p > align_end)
*--p = *--s;
word_start = (char *)align_up (start);
bytes_to_memmove = p - word_start;
p -= bytes_to_memmove;
s -= bytes_to_memmove;
MEMMOVE_WORDS_DOWNWARD (p, s, bytes_to_words (bytes_to_memmove));
} else {
volatile char *d = (char*)dest;
const char *s = (const char*)src;
size_t tail_bytes;
/* copy all words with memmove */
MEMMOVE_WORDS_UPWARD (d, s, bytes_to_words (align_down (size)));
tail_bytes = unaligned_bytes (size);
if (tail_bytes) {
d += (size_t)align_down (size);
s += (size_t)align_down (size);
do {
*d++ = *s++;
} while (--tail_bytes);
}
}
}
/**
* mono_gc_memmove_atomic:
* \param dest destination of the move
* \param src source
* \param size size of the block to move
*
* Move \p size bytes from \p src to \p dest.
*
* Use this to copy memory without word tearing when both pointers are aligned
*/
void
mono_gc_memmove_atomic (void *dest, const void *src, size_t size)
{
if (unaligned_bytes (_toi (dest) | _toi (src)))
memmove (dest, src, size);
else
mono_gc_memmove_aligned (dest, src, size);
}
#define _DEFAULT_MEM_SIZE 134217728
guint64
mono_determine_physical_ram_size (void)
{
#if defined (TARGET_WIN32)
MEMORYSTATUSEX memstat;
memstat.dwLength = sizeof (memstat);
GlobalMemoryStatusEx (&memstat);
return (guint64)memstat.ullTotalPhys;
#elif defined (__NetBSD__) || defined (__APPLE__) || defined (__FreeBSD__)
#if defined (__NetBSD__) || defined (__FreeBSD__)
unsigned long value;
#else
guint64 value;
#endif
int mib[2] = {
CTL_HW,
#ifdef __NetBSD__
HW_PHYSMEM64
#elif defined (__FreeBSD__)
HW_PHYSMEM
#else
HW_MEMSIZE
#endif
};
size_t size_sys = sizeof (value);
sysctl (mib, 2, &value, &size_sys, NULL, 0);
if (value == 0)
return _DEFAULT_MEM_SIZE;
return (guint64)value;
#elif defined (HAVE_SYSCONF)
guint64 page_size = 0, num_pages = 0, memsize;
/* sysconf works on most *NIX operating systems, if your system doesn't have it or if it
* reports invalid values, please add your OS specific code below. */
#ifdef _SC_PAGESIZE
page_size = (guint64)sysconf (_SC_PAGESIZE);
#endif
#ifdef _SC_PHYS_PAGES /* non-POSIX should work on: Linux, Solaris2, cygwin */
num_pages = (guint64)sysconf (_SC_PHYS_PAGES);
#endif
if (!page_size || !num_pages) {
g_warning ("Your operating system's sysconf (3) function doesn't correctly report physical memory size!");
return _DEFAULT_MEM_SIZE;
}
#if defined(_SC_AVPHYS_PAGES)
memsize = sysconf(_SC_AVPHYS_PAGES) * page_size;
#else
memsize = page_size * num_pages; /* Calculate physical memory size */
#endif
#if HAVE_CGROUP_SUPPORT
gint64 restricted_limit = mono_get_restricted_memory_limit(); /* Check for any cgroup limit */
if (restricted_limit != 0) {
gchar *heapHardLimit = getenv("DOTNET_GCHeapHardLimit"); /* See if user has set a limit */
if (heapHardLimit == NULL)
heapHardLimit = getenv("COMPlus_GCHeapHardLimit"); /* Check old envvar name */
errno = 0;
if (heapHardLimit != NULL) {
guint64 gcLimit = strtoull(heapHardLimit, NULL, 16);
if ((errno == 0) && (gcLimit != 0))
restricted_limit = (restricted_limit < gcLimit ? restricted_limit : (gint64) gcLimit);
} else {
gchar *heapHardLimitPct = getenv("DOTNET_GCHeapHardLimitPercent"); /* User % limit? */
if (heapHardLimitPct == NULL)
heapHardLimitPct = getenv("COMPlus_GCHeapHardLimitPercent"); /* Check old envvar name */
if (heapHardLimitPct != NULL) {
int gcLimit = strtoll(heapHardLimitPct, NULL, 16);
if ((gcLimit > 0) && (gcLimit <= 100))
restricted_limit = (gcLimit * restricted_limit) / 100;
else
restricted_limit = (3 * restricted_limit) / 4; /* Use 75% limit of container */
} else {
restricted_limit = (3 * restricted_limit) / 4; /* Use 75% limit of container */
}
}
return (restricted_limit < MINMEMSZ ? MINMEMSZ : /* Use at least 20MB */
(restricted_limit < memsize ? restricted_limit : memsize));
}
#endif
return memsize;
#else
return _DEFAULT_MEM_SIZE;
#endif
}
guint64
mono_determine_physical_ram_available_size (void)
{
#if defined (TARGET_WIN32)
MEMORYSTATUSEX memstat;
memstat.dwLength = sizeof (memstat);
GlobalMemoryStatusEx (&memstat);
return (guint64)memstat.ullAvailPhys;
#elif defined (__NetBSD__) || defined (__FreeBSD__)
struct vmtotal vm_total;
guint64 page_size;
int mib[2];
size_t len;
mib[0] = CTL_VM;
mib[1] = VM_METER;
len = sizeof (vm_total);
sysctl (mib, 2, &vm_total, &len, NULL, 0);
mib[0] = CTL_HW;
mib[1] = HW_PAGESIZE;
len = sizeof (page_size);
sysctl (mib, 2, &page_size, &len, NULL, 0);
return ((guint64) vm_total.t_free * page_size) / 1024;
#elif defined (__APPLE__)
mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
mach_port_t host = mach_host_self ();
vm_size_t page_size;
vm_statistics_data_t vmstat;
kern_return_t ret;
do {
ret = host_statistics (host, HOST_VM_INFO, (host_info_t)&vmstat, &count);
} while (ret == KERN_ABORTED);
if (ret != KERN_SUCCESS) {
g_warning ("Mono was unable to retrieve memory usage!");
return 0;
}
host_page_size (host, &page_size);
return (guint64) vmstat.free_count * page_size;
#elif HAVE_CGROUP_SUPPORT
return (mono_get_memory_avail());
#elif defined (HAVE_SYSCONF)
guint64 page_size = 0, num_pages = 0;
/* sysconf works on most *NIX operating systems, if your system doesn't have it or if it
* reports invalid values, please add your OS specific code below. */
#ifdef _SC_PAGESIZE
page_size = (guint64)sysconf (_SC_PAGESIZE);
#endif
#ifdef _SC_AVPHYS_PAGES /* non-POSIX should work on: Linux, Solaris2, cygwin */
num_pages = (guint64)sysconf (_SC_AVPHYS_PAGES);
#endif
if (!page_size || !num_pages) {
g_warning ("Your operating system's sysconf (3) function doesn't correctly report physical memory size!");
return _DEFAULT_MEM_SIZE;
}
return page_size * num_pages;
#else
return _DEFAULT_MEM_SIZE;
#endif
}
|
82499b00603234e09147cf01b5965923ceefd270
|
7e41167bfae6d2c38689b7e0993b308e045cbd05
|
/ssh_keygen/openbsd-compat/rresvport.c
|
1cd61e58dbad7ae6ec27046e2b76d5d4ce7b3454
|
[
"BSD-3-Clause",
"curl",
"GPL-1.0-or-later",
"MIT",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"ISC",
"snprintf",
"SSH-short"
] |
permissive
|
holzschu/ios_system
|
7b18993dbcb33331c353e0257ca54847a5d1b1bb
|
6a83eb1c6c383a562fbe33a7e97677e88d305b51
|
refs/heads/master
| 2023-08-14T09:11:40.627903
| 2023-04-18T15:12:29
| 2023-04-18T15:12:29
| 113,187,304
| 882
| 155
|
BSD-3-Clause
| 2023-08-19T19:15:35
| 2017-12-05T13:42:50
|
C
|
UTF-8
|
C
| false
| false
| 3,044
|
c
|
rresvport.c
|
/* $OpenBSD: rresvport.c,v 1.9 2005/11/10 10:00:17 espie Exp $ */
/*
* Copyright (c) 1995, 1996, 1998 Theo de Raadt. All rights reserved.
* Copyright (c) 1983, 1993, 1994
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* OPENBSD ORIGINAL: lib/libc/net/rresvport.c */
#include "includes.h"
#ifndef HAVE_RRESVPORT_AF
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#if 0
int
rresvport(int *alport)
{
return rresvport_af(alport, AF_INET);
}
#endif
int
rresvport_af(int *alport, sa_family_t af)
{
struct sockaddr_storage ss;
struct sockaddr *sa;
u_int16_t *portp;
int s;
socklen_t salen;
memset(&ss, '\0', sizeof ss);
sa = (struct sockaddr *)&ss;
switch (af) {
case AF_INET:
salen = sizeof(struct sockaddr_in);
portp = &((struct sockaddr_in *)sa)->sin_port;
break;
case AF_INET6:
salen = sizeof(struct sockaddr_in6);
portp = &((struct sockaddr_in6 *)sa)->sin6_port;
break;
default:
errno = EPFNOSUPPORT;
return (-1);
}
sa->sa_family = af;
s = socket(af, SOCK_STREAM, 0);
if (s < 0)
return (-1);
*portp = htons(*alport);
if (*alport < IPPORT_RESERVED - 1) {
if (bind(s, sa, salen) >= 0)
return (s);
if (errno != EADDRINUSE) {
(void)close(s);
return (-1);
}
}
*portp = 0;
sa->sa_family = af;
if (bindresvport_sa(s, sa) == -1) {
(void)close(s);
return (-1);
}
*alport = ntohs(*portp);
return (s);
}
#endif /* HAVE_RRESVPORT_AF */
|
ef23883d248dadc24659eb98ed99f569e23faacb
|
7ca8ffcdfb39ab4ffc2d8ff291e46ffabc8db6a2
|
/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/org_apache_hadoop_crypto.h
|
3f377cfb29bd1b1044e2f42924df4a0ec851f8cf
|
[
"Apache-2.0",
"CC-PDDC",
"CC0-1.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"CDDL-1.0",
"GCC-exception-3.1",
"MIT",
"EPL-1.0",
"Classpath-exception-2.0",
"BSD-3-Clause",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-jdom",
"CDDL-1.1",
"BSD-2-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
apache/hadoop
|
ea2a4a370dd00d4a3806dd38df5b3cf6fd5b2c64
|
42b4525f75b828bf58170187f030b08622e238ab
|
refs/heads/trunk
| 2023-08-18T07:29:26.346912
| 2023-08-17T16:56:34
| 2023-08-17T16:56:34
| 23,418,517
| 16,088
| 10,600
|
Apache-2.0
| 2023-09-14T16:59:38
| 2014-08-28T07:00:08
|
Java
|
UTF-8
|
C
| false
| false
| 1,833
|
h
|
org_apache_hadoop_crypto.h
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ORG_APACHE_HADOOP_CRYPTO_H
#define ORG_APACHE_HADOOP_CRYPTO_H
#include "org_apache_hadoop.h"
#ifdef UNIX
#include <dlfcn.h>
#include "config.h"
#endif
#ifdef WINDOWS
#include "winutils.h"
#endif
#include <openssl/aes.h>
#include <openssl/evp.h>
#include <openssl/err.h>
/**
* A helper macro to convert the java 'context-handle'
* to a EVP_CIPHER_CTX pointer.
*/
#define CONTEXT(context) ((EVP_CIPHER_CTX*)((ptrdiff_t)(context)))
/**
* A helper macro to convert the EVP_CIPHER_CTX pointer to the
* java 'context-handle'.
*/
#define JLONG(context) ((jlong)((ptrdiff_t)(context)))
/**
* A helper macro to convert long to ENGINE.
*/
#define LONG_TO_ENGINE(engine) ((ENGINE*)((ptrdiff_t)(engine)))
#define KEY_LENGTH_128 16
#define KEY_LENGTH_256 32
#define IV_LENGTH 16
#define ENCRYPT_MODE 1
#define DECRYPT_MODE 0
/** Currently only support AES/CTR/NoPadding & SM4/CTR/NoPadding. */
#define AES_CTR 0
#define SM4_CTR 1
#define NOPADDING 0
#define PKCSPADDING 1
#endif //ORG_APACHE_HADOOP_CRYPTO_H
|
367e17a8b2ccad4d896522d017127290d8d279ff
|
b4995c17067e4c16e287b4d0d454a9437ed38751
|
/src/njs_vmcode.h
|
88e5016432a343919551e77a57f9f52ce1b6b453
|
[
"BSD-2-Clause"
] |
permissive
|
nginx/njs
|
95f44a9128d26f8ed6a052b2e00dd7cff1410efb
|
e694d61a94c711b7c48b0ae23909a4cef7c65700
|
refs/heads/master
| 2023-08-24T18:52:57.529281
| 2023-08-23T17:09:22
| 2023-08-23T17:09:22
| 43,038,779
| 742
| 120
|
BSD-2-Clause
| 2023-07-30T09:23:31
| 2015-09-24T02:03:58
|
C
|
UTF-8
|
C
| false
| false
| 11,530
|
h
|
njs_vmcode.h
|
/*
* Copyright (C) Igor Sysoev
* Copyright (C) NGINX, Inc.
*/
#ifndef _NJS_VMCODE_H_INCLUDED_
#define _NJS_VMCODE_H_INCLUDED_
/*
* Negative return values handled by nJSVM interpreter as special events.
* The values must be in range from -1 to -11, because -12 is minimal jump
* offset on 32-bit platforms.
* 0 (NJS_OK) : njs_vmcode_stop() has stopped execution,
* execution successfully finished
* -1 (NJS_ERROR): error or exception;
* -2 .. -11: not used.
*/
/* The last return value which preempts execution. */
#define NJS_PREEMPT (-11)
typedef intptr_t njs_jump_off_t;
typedef uint8_t njs_vmcode_operation_t;
#define NJS_VMCODE_3OPERANDS 0
#define NJS_VMCODE_2OPERANDS 1
enum {
NJS_VMCODE_PUT_ARG = 0,
NJS_VMCODE_STOP,
NJS_VMCODE_JUMP,
NJS_VMCODE_PROPERTY_SET,
NJS_VMCODE_PROPERTY_ACCESSOR,
NJS_VMCODE_IF_TRUE_JUMP,
NJS_VMCODE_IF_FALSE_JUMP,
NJS_VMCODE_IF_EQUAL_JUMP,
NJS_VMCODE_PROPERTY_INIT,
NJS_VMCODE_RETURN,
NJS_VMCODE_FUNCTION_COPY,
NJS_VMCODE_FUNCTION_FRAME,
NJS_VMCODE_METHOD_FRAME,
NJS_VMCODE_FUNCTION_CALL,
NJS_VMCODE_PROPERTY_NEXT,
NJS_VMCODE_ARGUMENTS,
NJS_VMCODE_PROTO_INIT,
NJS_VMCODE_TO_PROPERTY_KEY,
NJS_VMCODE_TO_PROPERTY_KEY_CHK,
NJS_VMCODE_SET_FUNCTION_NAME,
NJS_VMCODE_IMPORT,
NJS_VMCODE_AWAIT,
NJS_VMCODE_TRY_START,
NJS_VMCODE_THROW,
NJS_VMCODE_TRY_BREAK,
NJS_VMCODE_TRY_CONTINUE,
NJS_VMCODE_TRY_END,
NJS_VMCODE_CATCH,
NJS_VMCODE_FINALLY,
NJS_VMCODE_LET,
NJS_VMCODE_LET_UPDATE,
NJS_VMCODE_INITIALIZATION_TEST,
NJS_VMCODE_NOT_INITIALIZED,
NJS_VMCODE_ASSIGNMENT_ERROR,
NJS_VMCODE_ERROR,
NJS_VMCODE_MOVE,
NJS_VMCODE_PROPERTY_GET,
NJS_VMCODE_INCREMENT,
NJS_VMCODE_POST_INCREMENT,
NJS_VMCODE_DECREMENT,
NJS_VMCODE_POST_DECREMENT,
NJS_VMCODE_TRY_RETURN,
NJS_VMCODE_GLOBAL_GET,
NJS_VMCODE_LESS,
NJS_VMCODE_GREATER,
NJS_VMCODE_LESS_OR_EQUAL,
NJS_VMCODE_GREATER_OR_EQUAL,
NJS_VMCODE_ADDITION,
NJS_VMCODE_EQUAL,
NJS_VMCODE_NOT_EQUAL,
NJS_VMCODE_SUBTRACTION,
NJS_VMCODE_MULTIPLICATION,
NJS_VMCODE_EXPONENTIATION,
NJS_VMCODE_DIVISION,
NJS_VMCODE_REMAINDER,
NJS_VMCODE_BITWISE_AND,
NJS_VMCODE_BITWISE_OR,
NJS_VMCODE_BITWISE_XOR,
NJS_VMCODE_LEFT_SHIFT,
NJS_VMCODE_RIGHT_SHIFT,
NJS_VMCODE_UNSIGNED_RIGHT_SHIFT,
NJS_VMCODE_TEMPLATE_LITERAL,
NJS_VMCODE_PROPERTY_IN,
NJS_VMCODE_PROPERTY_DELETE,
NJS_VMCODE_PROPERTY_FOREACH,
NJS_VMCODE_STRICT_EQUAL,
NJS_VMCODE_STRICT_NOT_EQUAL,
NJS_VMCODE_TEST_IF_TRUE,
NJS_VMCODE_TEST_IF_FALSE,
NJS_VMCODE_COALESCE,
NJS_VMCODE_UNARY_PLUS,
NJS_VMCODE_UNARY_NEGATION,
NJS_VMCODE_BITWISE_NOT,
NJS_VMCODE_LOGICAL_NOT,
NJS_VMCODE_OBJECT,
NJS_VMCODE_ARRAY,
NJS_VMCODE_FUNCTION,
NJS_VMCODE_REGEXP,
NJS_VMCODE_INSTANCE_OF,
NJS_VMCODE_TYPEOF,
NJS_VMCODE_VOID,
NJS_VMCODE_DELETE,
NJS_VMCODE_DEBUGGER,
NJS_VMCODES
};
typedef struct {
njs_vmcode_operation_t operation;
uint8_t operands; /* 2 bits */
} njs_vmcode_t;
typedef struct {
njs_vmcode_t code;
njs_index_t operand1;
njs_index_t operand2;
njs_index_t operand3;
} njs_vmcode_generic_t;
typedef struct {
njs_vmcode_t code;
njs_index_t index;
} njs_vmcode_1addr_t;
typedef struct {
njs_vmcode_t code;
njs_index_t dst;
njs_index_t src;
} njs_vmcode_2addr_t;
typedef struct {
njs_vmcode_t code;
njs_index_t dst;
njs_index_t src1;
njs_index_t src2;
} njs_vmcode_3addr_t;
typedef struct {
njs_vmcode_t code;
njs_index_t dst;
njs_index_t src;
} njs_vmcode_move_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
} njs_vmcode_object_t;
typedef struct {
njs_vmcode_t code;
njs_index_t dst;
} njs_vmcode_this_t;
typedef struct {
njs_vmcode_t code;
njs_index_t dst;
} njs_vmcode_arguments_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
uintptr_t length;
uint8_t ctor; /* 1 bit */
} njs_vmcode_array_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
} njs_vmcode_template_literal_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
njs_function_lambda_t *lambda;
njs_bool_t async;
} njs_vmcode_function_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
njs_regexp_pattern_t *pattern;
} njs_vmcode_regexp_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
njs_index_t object;
} njs_vmcode_object_copy_t;
typedef struct {
njs_vmcode_t code;
njs_jump_off_t offset;
} njs_vmcode_jump_t;
typedef struct {
njs_vmcode_t code;
njs_jump_off_t offset;
njs_index_t cond;
} njs_vmcode_cond_jump_t;
typedef struct {
njs_vmcode_t code;
njs_jump_off_t offset;
njs_index_t value1;
njs_index_t value2;
} njs_vmcode_equal_jump_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
njs_index_t value;
njs_jump_off_t offset;
} njs_vmcode_test_jump_t;
typedef struct {
njs_vmcode_t code;
njs_index_t value;
njs_index_t object;
njs_index_t property;
} njs_vmcode_prop_get_t;
typedef struct {
njs_vmcode_t code;
njs_index_t value;
njs_index_t object;
njs_index_t property;
} njs_vmcode_prop_set_t;
typedef struct {
njs_vmcode_t code;
njs_index_t value;
njs_index_t object;
njs_index_t property;
uint8_t type;
} njs_vmcode_prop_accessor_t;
typedef struct {
njs_vmcode_t code;
njs_index_t next;
njs_index_t object;
njs_jump_off_t offset;
} njs_vmcode_prop_foreach_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
njs_index_t object;
njs_index_t next;
njs_jump_off_t offset;
} njs_vmcode_prop_next_t;
typedef struct {
njs_vmcode_t code;
njs_index_t value;
njs_index_t constructor;
njs_index_t object;
} njs_vmcode_instance_of_t;
typedef struct {
njs_vmcode_t code;
njs_index_t nargs;
njs_index_t name;
uint8_t ctor; /* 1 bit */
} njs_vmcode_function_frame_t;
typedef struct {
njs_vmcode_t code;
njs_index_t nargs;
njs_index_t object;
njs_index_t method;
uint8_t ctor; /* 1 bit */
} njs_vmcode_method_frame_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
} njs_vmcode_function_call_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
} njs_vmcode_return_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
} njs_vmcode_stop_t;
typedef struct {
njs_vmcode_t code;
njs_jump_off_t offset;
njs_index_t exception_value;
njs_index_t exit_value;
} njs_vmcode_try_start_t;
typedef struct {
njs_vmcode_t code;
njs_jump_off_t offset;
njs_index_t exit_value;
} njs_vmcode_try_trampoline_t;
typedef struct {
njs_vmcode_t code;
njs_jump_off_t offset;
njs_index_t exception;
} njs_vmcode_catch_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
} njs_vmcode_throw_t;
typedef struct {
njs_vmcode_t code;
njs_jump_off_t offset;
} njs_vmcode_try_end_t;
typedef struct {
njs_vmcode_t code;
njs_index_t save;
njs_index_t retval;
njs_jump_off_t offset;
} njs_vmcode_try_return_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
njs_index_t exit_value;
njs_jump_off_t continue_offset;
njs_jump_off_t break_offset;
} njs_vmcode_finally_t;
typedef struct {
njs_vmcode_t code;
njs_object_type_t type;
union {
njs_str_t name;
njs_str_t message;
} u;
} njs_vmcode_error_t;
typedef struct {
njs_vmcode_t code;
njs_value_t *function;
njs_index_t retval;
} njs_vmcode_function_copy_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
njs_mod_t *module;
} njs_vmcode_import_t;
typedef struct {
njs_vmcode_t code;
njs_index_t dst;
} njs_vmcode_variable_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
} njs_vmcode_debugger_t;
typedef struct {
njs_vmcode_t code;
njs_index_t retval;
} njs_vmcode_await_t;
njs_int_t njs_vmcode_interpreter(njs_vm_t *vm, u_char *pc, njs_value_t *retval,
void *promise_cap, void *async_ctx);
njs_object_t *njs_function_new_object(njs_vm_t *vm, njs_value_t *constructor);
#ifdef NJS_DEBUG_OPCODE
#define njs_vmcode_debug(vm, pc, prefix) { \
if (vm->options.opcode_debug) do { \
njs_vm_code_t *code; \
\
code = njs_lookup_code(vm, pc); \
\
njs_printf("%s %V\n", prefix, \
(code != NULL) ? &code->name : &njs_entry_unknown); \
} while (0); \
}
#define njs_vmcode_debug_opcode() \
if (vm->options.opcode_debug) { \
njs_disassemble(pc, NULL, 1, NULL); \
}
#else
#define njs_vmcode_debug(vm, pc, prefix)
#define njs_vmcode_debug_opcode()
#endif
#endif /* _NJS_VMCODE_H_INCLUDED_ */
|
9e59da2c89b7844738a9bf58785d9fc416b567e5
|
5eff7a36d9a9917dce9111f0c3074375fe6f7656
|
/app/editres/comm.c
|
918dc7a5da64c90190a28d2df3b2a41890b0b16a
|
[
"MIT-open-group"
] |
permissive
|
openbsd/xenocara
|
cb392d02ebba06f6ff7d826fd8a89aa3b8401779
|
a012b5de33ea0b977095d77316a521195b26cc6b
|
refs/heads/master
| 2023-08-25T12:16:58.862008
| 2023-08-12T16:16:25
| 2023-08-12T16:16:25
| 66,967,384
| 177
| 66
| null | 2023-07-22T18:12:37
| 2016-08-30T18:36:01
|
C
|
UTF-8
|
C
| false
| false
| 24,499
|
c
|
comm.c
|
/*
Copyright 1990, 1998 The Open Group
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation.
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of The Open Group shall
not be used in advertising or otherwise to promote the sale, use or
other dealings in this Software without prior written authorization
from The Open Group.
*/
/*
* This file contains the code to communicate with the client that is
* being edited.
*/
#include <X11/Intrinsic.h>
#include <X11/StringDefs.h> /* Get standard string definitions. */
#include <X11/Xatom.h>
#include <X11/cursorfont.h> /* For crosshair cursor. */
#include <X11/Xproto.h>
#include <X11/Xos.h> /* for XtNewString */
#include <stdio.h>
#include <X11/Xmu/Error.h>
#include <X11/Xmu/WinUtil.h>
#include "editresP.h"
/*
* static Globals.
*/
static Atom atom_comm, atom_command, atom_resource_editor, atom_client_value;
static Atom atom_editres_protocol;
static void ClientTimedOut ( XtPointer data, XtIntervalId * id );
static void TellUserAboutMessage ( Widget label, ResCommand command );
static Boolean ConvertCommand ( Widget w, Atom * selection, Atom * target,
Atom * type_ret, XtPointer *value_ret,
unsigned long * length_ret, int * format_ret );
static void SelectionDone ( Widget w, Atom *sel, Atom *targ );
static void LoseSelection ( Widget w, Atom * sel );
static void GetClientValue ( Widget w, XtPointer data, Atom *selection,
Atom *type, XtPointer value,
unsigned long *length, int * format );
static void BuildHeader ( CurrentClient * client_data );
static Event * BuildEvent ( ProtocolStream * stream );
static void FreeEvent ( Event * event );
static char * DispatchEvent ( Event * event );
/* Function Name: ClientTimedOut
* Description: Called if the client takes too long to take our selection.
* Arguments: data - The widget that owns the client
* communication selection.
* id - *** UNUSED ***
* Returns: none.
*/
/* ARGSUSED */
static void
ClientTimedOut(XtPointer data, XtIntervalId *id)
{
char msg[BUFSIZ];
Widget w = (Widget) data;
global_client.ident = NO_IDENT;
XtDisownSelection(w, global_client.atom,
XtLastTimestampProcessed(XtDisplay(w)));
snprintf(msg, sizeof(msg), res_labels[4], "the Editres Protocol.");
SetMessage(global_screen_data.info_label, msg);
}
/* Function Name: GetClientWindow
* Description: Gets the Client's window by asking the user.
* Arguments: w - a widget.
* Returns: a clients window, or None.
*/
Window
GetClientWindow(Widget w, int *x, int *y)
{
int status;
Cursor cursor;
XEvent event;
int buttons = 0;
Display * dpy = XtDisplayOfObject(w);
Window target_win = None, root = RootWindowOfScreen(XtScreenOfObject(w));
XtAppContext app = XtWidgetToApplicationContext(w);
/* Make the target cursor */
cursor = XCreateFontCursor(dpy, XC_crosshair);
/* Grab the pointer using target cursor, letting it room all over */
status = XGrabPointer(dpy, root, False,
ButtonPressMask|ButtonReleaseMask, GrabModeSync,
GrabModeAsync, root, cursor, CurrentTime);
if (status != GrabSuccess) {
SetMessage(global_screen_data.info_label, res_labels[5]);
return(None);
}
/* Let the user select a window... */
while ((target_win == None) || (buttons != 0)) {
/* allow one more event */
XAllowEvents(dpy, SyncPointer, CurrentTime);
XtAppNextEvent(app, &event);
switch (event.type) {
case ButtonPress:
if (event.xbutton.window != root) {
XtDispatchEvent(&event);
break;
}
if (target_win == None) {
target_win = event.xbutton.subwindow; /* window selected */
if (x != NULL)
*x = event.xbutton.x_root;
if (y != NULL)
*y = event.xbutton.y_root;
}
buttons++;
break;
case ButtonRelease:
if (event.xbutton.window != root) {
XtDispatchEvent(&event);
break;
}
if (buttons > 0) /* There may have been some
down before we started */
buttons--;
break;
default:
XtDispatchEvent(&event);
break;
}
}
XUngrabPointer(dpy, CurrentTime); /* Done with pointer */
return(XmuClientWindow(dpy, target_win));
}
/* Function Name: SetCommand
* Description: Causes this widget to own the resource editor's
* command selection.
* Arguments: w - the widget that will own the selection.
* command - command to send to client.
* msg - message to prompt the user to select a client.
* Returns: none.
*/
/* ARGSUSED */
void
SetCommand(Widget w, ResCommand command, String msg)
{
XClientMessageEvent client_event;
Display * dpy = XtDisplay(w);
if (msg == NULL)
msg = res_labels[6];
SetMessage(global_screen_data.info_label, msg);
if (global_client.window == None)
if ( (global_client.window = GetClientWindow(w, NULL, NULL)) == None)
return;
global_client.ident = GetNewIdent();
global_client.command = command;
global_client.atom = atom_comm;
BuildHeader(&(global_client));
if (!XtOwnSelection(w, global_client.atom, CurrentTime, ConvertCommand,
LoseSelection, SelectionDone))
SetMessage(global_screen_data.info_label,
res_labels[7]);
client_event.window = global_client.window;
client_event.type = ClientMessage;
client_event.message_type = atom_resource_editor;
client_event.format = EDITRES_SEND_EVENT_FORMAT;
client_event.data.l[0] = XtLastTimestampProcessed(dpy);
client_event.data.l[1] = global_client.atom;
client_event.data.l[2] = (long) global_client.ident;
client_event.data.l[3] = global_effective_protocol_version;
global_error_code = NO_ERROR; /* Reset Error code. */
global_old_error_handler = XSetErrorHandler(HandleXErrors);
global_serial_num = NextRequest(dpy);
XSendEvent(dpy, global_client.window, FALSE, (long) 0,
(XEvent *) &client_event);
XSync(dpy, FALSE);
XSetErrorHandler(global_old_error_handler);
if (global_error_code == NO_WINDOW) {
char error_buf[BUFSIZ] =
"The communication window with the"
" application is no longer available\n"
"Please select a new widget tree.";
global_error_code = NO_ERROR; /* Reset Error code. */
global_client.window = None;
SetCommand(w, LocalSendWidgetTree, error_buf);
return;
}
TellUserAboutMessage(global_screen_data.info_label, command);
global_client.timeout = XtAppAddTimeOut(XtWidgetToApplicationContext(w),
CLIENT_TIME_OUT,
ClientTimedOut, (XtPointer) w);
}
/* Function Name: TellUserAboutMessage
* Description: Informs the user that we have sent a message to the client
* Arguments: label - the info label.
* command - command that we have executed.
* Returns: none.
*/
static void
TellUserAboutMessage(Widget label, ResCommand command)
{
char msg[BUFSIZ];
const char *str;
switch(command) {
case LocalSendWidgetTree:
str = " asking for widget tree";
break;
case LocalSetValues:
str = " asking it to perform SetValues()";
break;
case LocalFlashWidget:
case LocalGetGeometry:
str = " asking it to perform GetGeometry()";
break;
case LocalGetResources:
str = " asking it to get a widget's resource list";
break;
case LocalFindChild:
str = " asking it to find the child Widget.";
break;
default:
str = "";
break;
}
snprintf(msg, sizeof(msg), res_labels[8], str);
SetMessage(label, msg);
}
/* Function Name: ConvertCommand
* Description: Converts the command string into a selection that can
* be sent to the client.
* Arguments: (see Xt)
* Returns: TRUE if we could convert the selection and target asked for.
*/
/* ARGSUSED */
static Boolean
ConvertCommand(Widget w, Atom *selection, Atom *target, Atom *type_ret,
XtPointer *value_ret, unsigned long *length_ret, int *format_ret)
{
if ((*selection != atom_comm) || (*target != atom_command))
return(FALSE);
*type_ret = atom_editres_protocol;
*value_ret = (XtPointer) global_client.stream.real_top;
*length_ret = global_client.stream.size + HEADER_SIZE;
*format_ret = EDITRES_FORMAT;
return(TRUE);
}
/* Function Name: SelectionDone
* Description: done with the selection.
* Arguments: *** UNUSED ***
* Returns: none.
*/
/* ARGSUSED */
static void
SelectionDone(Widget w, Atom *sel, Atom *targ)
{
/* Keep the toolkit from automatically freeing the selection value */
}
/* Function Name: LoseSelection
* Description: Called when we have lost the selection, asks client
* for the selection value.
* Arguments: w - the widget that just lost the selection.
* sel - the selection.
* Returns: none.
*/
static void
LoseSelection(Widget w, Atom *sel)
{
if (global_client.timeout != 0) {
XtRemoveTimeOut(global_client.timeout);
global_client.timeout = 0;
}
XtGetSelectionValue(w, *sel, atom_client_value, GetClientValue,
NULL, XtLastTimestampProcessed(XtDisplay(w)));
}
/* Function Name: GetClientValue
* Description: Gets the value out of the client, and does good things
* to it.
* Arguments: w - the widget that asked for the selection.
* data - client_data *** UNUSED ***.
* sel - the selection.
* type - the type of the selection.
* value - the selection's value.
* length - the length of the selection's value.
* format - the format of the selection.
* Returns: none.
*/
static Boolean reset_protocol_level = True;
/* ARGSUSED */
static void
GetClientValue(Widget w, XtPointer data, Atom *selection, Atom *type,
XtPointer value, unsigned long *length, int *format)
{
Event * event;
ProtocolStream alloc_stream, *stream;
unsigned char ident, error_code;
char * error_str = NULL, msg[BUFSIZ];
if (*length == 0)
return;
stream = &alloc_stream; /* easier to think of it this way... */
stream->current = stream->top = (unsigned char *) value;
stream->size = HEADER_SIZE; /* size of header. */
/*
* Retrieve the Header.
*/
if (*length < HEADER_SIZE) {
SetMessage(global_screen_data.info_label,
res_labels[9]);
return;
}
(void) _XEditResGet8(stream, &ident);
if (global_client.ident != ident) {
#ifdef DEBUG
if (global_resources.debug)
printf("Incorrect ident from client.\n");
#endif
if (!XtOwnSelection(w, *selection, CurrentTime, ConvertCommand,
LoseSelection, SelectionDone))
SetMessage(global_screen_data.info_label,
res_labels[10]);
return;
}
(void) _XEditResGet8(stream, &error_code);
(void) _XEditResGet32(stream, &(stream->size));
stream->top = stream->current; /* reset stream to top of value.*/
switch ((int) error_code) {
case PartialSuccess:
/*****
if (global_client.command == LocalSendWidgetTree &&
global_effective_protocol_version < CURRENT_PROTOCOL_VERSION)
++global_effective_protocol_version;
*****/
if ((event = BuildEvent(stream)) != NULL) {
error_str = DispatchEvent(event);
FreeEvent(event);
}
else {
snprintf(msg, sizeof(msg), "Unable to unpack protocol request.");
error_str = XtNewString(msg);
}
break;
case Failure:
error_str = GetFailureMessage(stream);
break;
case ProtocolMismatch:
error_str = ProtocolFailure(stream);
--global_effective_protocol_version;
/* normally protocol version is reset to current during a SendWidgetTree
* request, however, after a protocol failure this is skipped once for
* a retry.
*/
reset_protocol_level = False;
SetCommand(w, LocalSendWidgetTree, NULL);
break;
default:
snprintf(msg, sizeof(msg), res_labels[11], (int) error_code);
SetMessage(global_screen_data.info_label, msg);
break;
}
if (error_str == NULL) {
WNode * top;
if (global_tree_info == NULL)
return;
top = global_tree_info->top_node;
snprintf(msg, sizeof(msg), res_labels[12], top->name, top->class);
SetMessage(global_screen_data.info_label, msg);
return;
}
SetMessage(global_screen_data.info_label, error_str);
XtFree(error_str);
}
/* Function Name: BuildHeader
* Description: Puts the header into the message.
* Arguments: client_data - the client data.
* Returns: none.
*/
static void
BuildHeader(CurrentClient *client_data)
{
unsigned long old_alloc, old_size;
unsigned char * old_current;
EditresCommand command;
ProtocolStream * stream = &(client_data->stream);
/*
* We have cleverly keep enough space at the top of the header
* for the return protocol stream, so all we have to do is
* fill in the space.
*/
/*
* Fool the insert routines into putting the header in the right
* place while being damn sure not to realloc (that would be very bad.
*/
old_current = stream->current;
old_alloc = stream->alloc;
old_size = stream->size;
stream->current = stream->real_top;
stream->alloc = stream->size + (2 * HEADER_SIZE);
_XEditResPut8(stream, client_data->ident);
switch(client_data->command) {
case LocalSendWidgetTree:
if (reset_protocol_level) global_effective_protocol_version =
CURRENT_PROTOCOL_VERSION;
reset_protocol_level = True;
command = SendWidgetTree;
break;
case LocalSetValues:
command = SetValues;
break;
case LocalFlashWidget:
command = GetGeometry;
break;
case LocalGetResources:
command = GetResources;
break;
case LocalFindChild:
command = FindChild;
break;
case LocalGetValues:
command = GetValues;
break;
default:
command = SendWidgetTree;
break;
}
_XEditResPut8(stream, (unsigned char) command);
_XEditResPut32(stream, old_size);
stream->alloc = old_alloc;
stream->current = old_current;
stream->size = old_size;
}
/* Function Name: BuildEvent
* Description: Builds the event structure from the
* Arguments: stream - the protocol data stream.
* Returns: event - the event.
*/
static Event *
BuildEvent(ProtocolStream *stream)
{
int i;
Event * event = (Event *) XtCalloc(sizeof(Event), 1);
/*
* The return value will be different depending upon the
* request sent out.
*/
switch(global_client.command) {
case LocalSendWidgetTree:
{
SendWidgetTreeEvent * send_event = (SendWidgetTreeEvent *) event;
send_event->type = SendWidgetTree;
if (!_XEditResGet16(stream, &(send_event->num_entries)))
goto done;
send_event->info = (WidgetTreeInfo *)
XtCalloc(sizeof(WidgetTreeInfo),
send_event->num_entries);
for (i = 0; i < (int)send_event->num_entries; i++) {
WidgetTreeInfo * info = send_event->info + i;
if (!(_XEditResGetWidgetInfo(stream, &(info->widgets)) &&
_XEditResGetString8(stream, &(info->name)) &&
_XEditResGetString8(stream, &(info->class)) &&
_XEditResGet32(stream, &(info->window))))
{
goto done;
}
}
if (global_effective_protocol_version ==
CURRENT_PROTOCOL_VERSION) {
/* get toolkit type and reset if necessary */
if (!_XEditResGetString8(stream, &(send_event->toolkit)))
goto done;
}
/* set the command menu entries senitive */
SetEntriesSensitive(&CM_entries[CM_OFFSET], CM_NUM, True);
/* set the tree menu entries senitive */
SetEntriesSensitive(TM_entries, TM_NUM, True);
if (global_effective_protocol_version ==
CURRENT_PROTOCOL_VERSION) {
if (!strcmp(send_event->toolkit, "InterViews"))
RebuildMenusAndLabel("iv");
}
else
RebuildMenusAndLabel("xt");
}
break;
case LocalSetValues:
{
SetValuesEvent * sv_event = (SetValuesEvent *) event;
sv_event->type = SetValues;
if (!_XEditResGet16(stream, &(sv_event->num_entries)))
goto done;
sv_event->info = (SetValuesInfo *) XtCalloc(sizeof(SetValuesInfo),
sv_event->num_entries);
for (i = 0; i < (int)sv_event->num_entries; i++) {
SetValuesInfo * info = sv_event->info + i;
if (!(_XEditResGetWidgetInfo(stream, &(info->widgets)) &&
_XEditResGetString8(stream, &(info->message))))
{
goto done;
}
}
}
break;
case LocalGetResources:
{
GetResourcesEvent * res_event = (GetResourcesEvent *) event;
res_event->type = GetGeometry;
if (!_XEditResGet16(stream, &(res_event->num_entries)))
goto done;
res_event->info = (GetResourcesInfo *)
XtCalloc(sizeof(GetResourcesInfo),
res_event->num_entries);
for (i = 0; i < (int)res_event->num_entries; i++) {
GetResourcesInfo * res_info = res_event->info + i;
if (!(_XEditResGetWidgetInfo(stream, &(res_info->widgets)) &&
_XEditResGetBoolean(stream, &(res_info->error))))
{
goto done;
}
if (res_info->error) {
if (!_XEditResGetString8(stream, &(res_info->message)))
goto done;
}
else {
unsigned int j;
if (!_XEditResGet16(stream, &(res_info->num_resources)))
goto done;
res_info->res_info = (ResourceInfo *)
XtCalloc(sizeof(ResourceInfo),
res_info->num_resources);
for (j = 0; j < res_info->num_resources; j++) {
unsigned char temp;
ResourceInfo * info = res_info->res_info + j;
if (!(_XEditResGetResType(stream, &(temp)) &&
_XEditResGetString8(stream, &(info->name)) &&
_XEditResGetString8(stream, &(info->class)) &&
_XEditResGetString8(stream, &(info->type))))
{
goto done;
}
else
info->res_type = (ResourceType) temp;
} /* for */
} /* else */
} /* for */
}
break;
case LocalFlashWidget:
case LocalGetGeometry:
{
GetGeomEvent * geom_event = (GetGeomEvent *) event;
geom_event->type = GetGeometry;
if (!_XEditResGet16(stream, &(geom_event->num_entries)))
goto done;
geom_event->info = (GetGeomInfo *) XtCalloc(sizeof(GetGeomInfo),
geom_event->num_entries);
for (i = 0; i < (int)geom_event->num_entries; i++) {
GetGeomInfo * info = geom_event->info + i;
if (!(_XEditResGetWidgetInfo(stream, &(info->widgets)) &&
_XEditResGetBoolean(stream, &(info->error))))
{
goto done;
}
if (info->error) {
if (!_XEditResGetString8(stream, &(info->message)))
goto done;
}
else {
if (!(_XEditResGetBoolean(stream, &(info->visible)) &&
_XEditResGetSigned16(stream, &(info->x)) &&
_XEditResGetSigned16(stream, &(info->y)) &&
_XEditResGet16(stream, &(info->width)) &&
_XEditResGet16(stream, &(info->height)) &&
_XEditResGet16(stream, &(info->border_width))))
{
goto done;
}
}
}
}
break;
case LocalFindChild:
{
FindChildEvent * find_event = (FindChildEvent *) event;
find_event->type = FindChild;
if (!_XEditResGetWidgetInfo(stream, &(find_event->widgets)))
goto done;
}
break;
case LocalGetValues: /* This is for REPLY... */
{
Arg args[1];
GetValuesEvent * gv_event = (GetValuesEvent *) event;
gv_event->type = GetValues;
if (!_XEditResGet16(stream, &(gv_event->num_entries)))
goto done;
gv_event->info = (GetValuesInfo*)XtCalloc(sizeof(GetValuesInfo),1);
{
GetValuesInfo * info = gv_event->info;
if (!(_XEditResGetString8(stream, &(info->value))))
{
goto done;
}
/* set the string value of the asciitext widget. note that only
* one active node is dealt with here. This is ok because only
* one node can be active when the resource box is up.
*/
XtSetArg (args[0], XtNstring, info->value);
XtSetValues(
global_tree_info->active_nodes[0]->resources->res_box->value_wid,
args, 1
);
}
}
break;
default:
goto done;
}
return(event);
done:
FreeEvent(event);
return(NULL);
}
/* Function Name: FreeEvent
* Description: Frees all memory associated with the event.
* Arguments: event - the event.
* Returns: none.
*
* NOTE: XtFree() returns w/o freeing if ptr is NULL.
*/
static void
FreeEvent(Event *event)
{
unsigned int i;
switch(event->any_event.type) {
case SendWidgetTree:
{
SendWidgetTreeEvent * send_event = (SendWidgetTreeEvent *) event;
WidgetTreeInfo * info = send_event->info;
if (info != NULL) {
for (i = 0; i < send_event->num_entries; i++, info++) {
XtFree((char *)info->widgets.ids);
XtFree(info->name);
XtFree(info->class);
}
XtFree((char *)send_event->info);
}
}
break;
case SetValues:
{
SetValuesEvent * sv_event = (SetValuesEvent *) event;
SetValuesInfo * info = sv_event->info;
if (info != NULL) {
for (i = 0; i < sv_event->num_entries; i++, info++) {
XtFree((char *)info->widgets.ids);
XtFree(info->message);
}
XtFree((char *)sv_event->info);
}
}
break;
case GetResources:
{
GetResourcesEvent * get_event = (GetResourcesEvent *) event;
GetResourcesInfo * info = get_event->info;
if (info != NULL) {
for (i = 0; i < get_event->num_entries; i++, info++) {
XtFree((char *)info->widgets.ids);
if (info->error)
XtFree(info->message);
else {
unsigned int j;
ResourceInfo * res_info = info->res_info;
if (res_info != NULL) {
for (j = 0;
j < info->num_resources; j++, res_info++)
{
XtFree(res_info->name);
XtFree(res_info->class);
XtFree(res_info->type);
}
XtFree((char *)info->res_info);
}
}
}
XtFree((char *)get_event->info);
}
}
break;
case GetGeometry:
{
GetGeomEvent * geom_event = (GetGeomEvent *) event;
GetGeomInfo * info = geom_event->info;
if (info != NULL) {
for (i = 0; i < geom_event->num_entries; i++, info++) {
XtFree((char *)info->widgets.ids);
if (info->error)
XtFree(info->message);
}
XtFree((char *)geom_event->info);
}
}
break;
case FindChild:
{
FindChildEvent * find_event = (FindChildEvent *) event;
XtFree((char *)find_event->widgets.ids);
}
break;
default:
break;
}
}
/* Function Name: DispatchEvent
* Description: Handles the event, calling the proper function.
* Arguments: event - the event.
* Returns: one.
*/
static char *
DispatchEvent(Event *event)
{
char * error = NULL;
switch(global_client.command) {
case LocalSendWidgetTree:
BuildVisualTree(global_tree_parent, event);
break;
case LocalSetValues:
error = PrintSetValuesError(event);
break;
case LocalFlashWidget:
error = HandleFlashWidget(event);
break;
case LocalGetResources:
error = HandleGetResources(event);
break;
case LocalFindChild:
DisplayChild(event);
break;
case LocalGetValues:
break;
default:
{
char msg[BUFSIZ];
snprintf(msg, sizeof(msg), "Internal error: Unknown command %d.",
global_client.command);
error = XtNewString(msg);
}
break;
}
return(error);
}
/* Function Name: InternAtoms
* Description: interns all static atoms.
* Arguments: display - the current display.
* Returns: none.
*/
void
InternAtoms(Display * dpy)
{
atom_comm = XInternAtom(dpy, EDITRES_COMM_ATOM, False);
atom_command = XInternAtom(dpy, EDITRES_COMMAND_ATOM, False);
atom_resource_editor = XInternAtom(dpy, EDITRES_NAME, False);
atom_client_value = XInternAtom(dpy, EDITRES_CLIENT_VALUE, False);
atom_editres_protocol = XInternAtom(dpy, EDITRES_PROTOCOL_ATOM, False);
}
ResIdent
GetNewIdent(void)
{
static ResIdent ident = 1;
return(ident++);
}
|
cade81498d76e168dc3359d3f16b270594bd5b75
|
b0f08154e3eebc7d8465efc57597e52d08d69c18
|
/src/xasl/compile_context.h
|
2eedc890ad4f30f0491087551db29cd5a2ca4f6b
|
[
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
CUBRID/cubrid
|
8f71a0010243b72e43ba887d229210650f4e901e
|
3b952af33230839a1b561a78ecd4b773374b66f8
|
refs/heads/develop
| 2023-08-18T19:16:30.987583
| 2023-08-18T08:18:05
| 2023-08-18T08:18:05
| 52,080,367
| 287
| 294
|
NOASSERTION
| 2023-09-14T21:29:09
| 2016-02-19T10:25:32
|
C
|
UTF-8
|
C
| false
| false
| 1,639
|
h
|
compile_context.h
|
/*
* Copyright 2008 Search Solution Corporation
* Copyright 2016 CUBRID Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
//
// compile_context.h - client/server common context used for prepare phase
//
#ifndef _COMPILE_CONTEXT_H_
#define _COMPILE_CONTEXT_H_
// forward definitions
struct xasl_node;
// note - file should be compatible to C language
#include "sha1.h"
/*
* COMPILE_CONTEXT cover from user input query string to generated xasl
*/
typedef struct compile_context COMPILE_CONTEXT;
struct compile_context
{
struct xasl_node *xasl;
char *sql_user_text; /* original query statement that user input */
int sql_user_text_len; /* length of sql_user_text */
char *sql_hash_text; /* rewritten query string which is used as hash key */
char *sql_plan_text; /* plans for this query */
int sql_plan_alloc_size; /* query_plan alloc size */
bool is_xasl_pinned_reference; /* to pin xasl cache entry */
bool recompile_xasl_pinned; /* whether recompile again after xasl cache entry has been pinned */
bool recompile_xasl;
SHA1Hash sha1;
};
#endif // _COMPILE_CONTEXT_H_
|
77d02f9ecc5661dbae85d9fa3a8eeb6f92f9db3f
|
04789ed7fb179dcd450e60e2f83b31dd53c8faed
|
/experimental/old_barevm/include/vm.h
|
4964dacf5c02be5e09eeed15d960dd13a9472145
|
[
"Unlicense"
] |
permissive
|
eliben/bobscheme
|
62d105f3647f3324ba046ceddbcba5de5467a0eb
|
aca6d4dfa0c211cc44502111a4a885b82c99ecf4
|
refs/heads/master
| 2023-08-27T20:51:24.494479
| 2023-02-03T16:44:57
| 2023-02-03T16:44:57
| 10,336,841
| 150
| 30
|
Unlicense
| 2023-02-03T16:41:26
| 2013-05-28T13:57:07
|
C++
|
UTF-8
|
C
| false
| false
| 428
|
h
|
vm.h
|
/******************************************************************************
** bob: The main VM implementation
**
** Eli Bendersky (eliben@gmail.com)
** This code is in the public domain
******************************************************************************/
#ifndef VM_H
#define VM_H
struct BobCodeObject;
void init_vm(FILE* output_stream);
void vm_run_code(struct BobCodeObject* codeobj);
#endif /* VM_H */
|
05cad2b4144bdff0482968999ae78a35aa4cd323
|
ef2d4ed65259b3f614426664939e9fb938715299
|
/darknet/src/logistic_layer.h
|
9c25bee3c2a6eb1013ed43ce0c4aeaa63b7a293f
|
[
"MIT",
"LicenseRef-scancode-yolo-1.0",
"WTFPL",
"LicenseRef-scancode-yolo-2.0",
"GPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
guanshuicheng/invoice
|
292ab7bd20ab07d4bdf9ca0f60ae2cf0e3bb06a4
|
bd201ed8e977421277775075a107a475bce0490d
|
refs/heads/master
| 2023-06-24T05:20:37.849072
| 2023-06-16T02:56:09
| 2023-06-16T02:56:09
| 200,155,602
| 1,399
| 379
|
MIT
| 2023-03-25T01:35:03
| 2019-08-02T03:06:41
|
C
|
UTF-8
|
C
| false
| false
| 406
|
h
|
logistic_layer.h
|
#ifndef LOGISTIC_LAYER_H
#define LOGISTIC_LAYER_H
#include "layer.h"
#include "network.h"
layer make_logistic_layer(int batch, int inputs);
void forward_logistic_layer(const layer l, network net);
void backward_logistic_layer(const layer l, network net);
#ifdef GPU
void forward_logistic_layer_gpu(const layer l, network net);
void backward_logistic_layer_gpu(const layer l, network net);
#endif
#endif
|
4a064f5a8fe40ca499a51f08eefcf748e8e3d7bd
|
3092a58c12d3f7a887e072c4b98c93c6474ae233
|
/src/decomp/engine/geo_layout.c
|
ae40125c559f44a4041bf768ded23263a60fc718
|
[
"CC0-1.0"
] |
permissive
|
libsm64/libsm64
|
92bb6d90b42250dd54a7e01d77a3ae717e3c949b
|
9726929fc6ac75abaedbd168f41b16b7d289e544
|
refs/heads/master
| 2023-08-16T11:01:34.534480
| 2023-08-12T19:11:02
| 2023-08-12T19:11:02
| 302,426,793
| 491
| 35
|
CC0-1.0
| 2023-08-12T19:11:03
| 2020-10-08T18:20:09
|
C
|
UTF-8
|
C
| false
| false
| 24,139
|
c
|
geo_layout.c
|
#include "../include/sm64.h"
#include "geo_layout.h"
#include "math_util.h"
#include "../memory.h"
#include "graph_node.h"
#include "../shim.h"
static Vec3s gVec3sZero = { 0, 0, 0 };
typedef void (*GeoLayoutCommandProc)(void);
GeoLayoutCommandProc GeoLayoutJumpTable[] = {
geo_layout_cmd_branch_and_link,
geo_layout_cmd_end,
geo_layout_cmd_branch,
geo_layout_cmd_return,
geo_layout_cmd_open_node,
geo_layout_cmd_close_node,
geo_layout_cmd_assign_as_view,
geo_layout_cmd_update_node_flags,
geo_layout_cmd_node_root,
geo_layout_cmd_node_ortho_projection,
geo_layout_cmd_node_perspective,
geo_layout_cmd_node_start,
geo_layout_cmd_node_master_list,
geo_layout_cmd_node_level_of_detail,
geo_layout_cmd_node_switch_case,
geo_layout_cmd_node_camera,
geo_layout_cmd_node_translation_rotation,
geo_layout_cmd_node_translation,
geo_layout_cmd_node_rotation,
geo_layout_cmd_node_animated_part,
geo_layout_cmd_node_billboard,
geo_layout_cmd_node_display_list,
geo_layout_cmd_node_shadow,
geo_layout_cmd_node_object_parent,
geo_layout_cmd_node_generated,
geo_layout_cmd_node_background,
geo_layout_cmd_nop,
geo_layout_cmd_copy_view,
geo_layout_cmd_node_held_obj,
geo_layout_cmd_node_scale,
geo_layout_cmd_nop2,
geo_layout_cmd_nop3,
geo_layout_cmd_node_culling_radius,
};
struct GraphNode gObjParentGraphNode;
struct AllocOnlyPool *gGraphNodePool;
struct GraphNode *gCurRootGraphNode;
UNUSED s32 D_8038BCA8;
/* The gGeoViews array is a mysterious one. Some background:
*
* If there are e.g. multiple Goombas, the multiple Goomba objects share one
* Geo node tree describing the goomba 3D model. Since every node has a single
* parent field and not a parent array, the parent is dynamically rebinded to
* each goomba instance just before rendering and set to null afterwards.
* The same happens for ObjectParentNode, which has as his sharedChild a group
* of all 240 object nodes. Why does the ObjectParentNode exist at all, if its
* only purpose is to temporarily bind the actual group with objects? This might
* be another remnant to Luigi.
*
* When creating a root node, room for (2 + cmd+0x02) pointers is allocated in
* gGeoViews. Except for the title screen, cmd+0x02 is 10. The 2 default ones
* might be for Mario and Luigi, and the other 10 could be different cameras for
* different rooms / boss fights. An area might be structured like this:
*
* geo_camera mode_player //Mario cam
* geo_open_node
* geo_render_obj
* geo_assign_as_view 1 // currently unused geo command
* geo_close_node
*
* geo_camera mode_player //Luigi cam
* geo_open_node
* geo_render_obj
* geo_copy_view 1 // currently unused geo command
* geo_assign_as_view 2
* geo_close_node
*
* geo_camera mode_boss //boss fight cam
* geo_assign_as_view 3
* ...
*
* There might also be specific geo nodes for Mario or Luigi only. Or a fixed camera
* might not have display list nodes of parts of the level that are out of view.
* In the end Luigi got scrapped and the multiple-camera design did not pan out,
* so everything was reduced to a single ObjectParent with a single group, and
* camera switching was all done in one node. End of speculation.
*/
struct GraphNode **gGeoViews;
u16 gGeoNumViews; // length of gGeoViews array
uintptr_t gGeoLayoutStack[16];
struct GraphNode *gCurGraphNodeList[32];
s16 gCurGraphNodeIndex;
s16 gGeoLayoutStackIndex; // similar to SP register in MIPS
UNUSED s16 D_8038BD7C;
s16 gGeoLayoutReturnIndex; // similar to RA register in MIPS
u8 *gGeoLayoutCommand;
u32 unused_8038B894[3] = { 0 };
/*
0x00: Branch and store return address
cmd+0x04: void *branchTarget
*/
void geo_layout_cmd_branch_and_link(void) {
gGeoLayoutStack[gGeoLayoutStackIndex++] = (uintptr_t) (gGeoLayoutCommand + CMD_PROCESS_OFFSET(8));
gGeoLayoutStack[gGeoLayoutStackIndex++] = (gCurGraphNodeIndex << 16) + gGeoLayoutReturnIndex;
gGeoLayoutReturnIndex = gGeoLayoutStackIndex;
gGeoLayoutCommand = segmented_to_virtual(cur_geo_cmd_ptr(0x04));
}
// 0x01: Terminate geo layout
void geo_layout_cmd_end(void) {
gGeoLayoutStackIndex = gGeoLayoutReturnIndex;
gGeoLayoutReturnIndex = gGeoLayoutStack[--gGeoLayoutStackIndex] & 0xFFFF;
gCurGraphNodeIndex = gGeoLayoutStack[gGeoLayoutStackIndex] >> 16;
gGeoLayoutCommand = (u8 *) gGeoLayoutStack[--gGeoLayoutStackIndex];
}
/*
0x02: Branch
cmd+0x04: void *branchTarget
*/
void geo_layout_cmd_branch(void) {
if (cur_geo_cmd_u8(0x01) == 1) {
gGeoLayoutStack[gGeoLayoutStackIndex++] = (uintptr_t) (gGeoLayoutCommand + CMD_PROCESS_OFFSET(8));
}
gGeoLayoutCommand = segmented_to_virtual(cur_geo_cmd_ptr(0x04));
}
// 0x03: Return from branch
void geo_layout_cmd_return(void) {
gGeoLayoutCommand = (u8 *) gGeoLayoutStack[--gGeoLayoutStackIndex];
}
// 0x04: Open node
void geo_layout_cmd_open_node(void) {
gCurGraphNodeList[gCurGraphNodeIndex + 1] = gCurGraphNodeList[gCurGraphNodeIndex];
gCurGraphNodeIndex++;
gGeoLayoutCommand += 0x04 << CMD_SIZE_SHIFT;
}
// 0x05: Close node
void geo_layout_cmd_close_node(void) {
gCurGraphNodeIndex--;
gGeoLayoutCommand += 0x04 << CMD_SIZE_SHIFT;
}
/*
0x06: Register the current node as a view
cmd+0x02: index
Register the current node in the gGeoViews array at the given index
*/
void geo_layout_cmd_assign_as_view(void) {
u16 index = cur_geo_cmd_s16(0x02);
if (index < gGeoNumViews) {
gGeoViews[index] = gCurGraphNodeList[gCurGraphNodeIndex];
}
gGeoLayoutCommand += 0x04 << CMD_SIZE_SHIFT;
}
/*
0x07: Update current scene graph node flags
cmd+0x01: u8 operation (0 = reset, 1 = set, 2 = clear)
cmd+0x02: s16 bits
*/
void geo_layout_cmd_update_node_flags(void) {
u16 operation = cur_geo_cmd_u8(0x01);
u16 flagBits = cur_geo_cmd_s16(0x02);
switch (operation) {
case GEO_CMD_FLAGS_RESET:
gCurGraphNodeList[gCurGraphNodeIndex]->flags = flagBits;
break;
case GEO_CMD_FLAGS_SET:
gCurGraphNodeList[gCurGraphNodeIndex]->flags |= flagBits;
break;
case GEO_CMD_FLAGS_CLEAR:
gCurGraphNodeList[gCurGraphNodeIndex]->flags &= ~flagBits;
break;
}
gGeoLayoutCommand += 0x04 << CMD_SIZE_SHIFT;
}
/*
0x08: Create a scene graph root node that specifies the viewport
cmd+0x02: s16 num entries (+2) to allocate for gGeoViews
cmd+0x04: s16 x
cmd+0x06: s16 y
cmd+0x08: s16 width
cmd+0x0A: s16 height
*/
void geo_layout_cmd_node_root(void) {
s32 i;
struct GraphNodeRoot *graphNode;
s16 x = cur_geo_cmd_s16(0x04);
s16 y = cur_geo_cmd_s16(0x06);
s16 width = cur_geo_cmd_s16(0x08);
s16 height = cur_geo_cmd_s16(0x0A);
// number of entries to allocate for gGeoViews array
// at least 2 are allocated by default
// cmd+0x02 = 0x00: Mario face, 0x0A: all other levels
gGeoNumViews = cur_geo_cmd_s16(0x02) + 2;
graphNode = init_graph_node_root(gGraphNodePool, NULL, 0, x, y, width, height);
// gGeoViews is unused in libsm64
gGeoViews = NULL; // alloc_only_pool_alloc(gGraphNodePool, gGeoNumViews * sizeof(struct GraphNode *));
graphNode->views = gGeoViews;
graphNode->numViews = gGeoNumViews;
for (i = 0; i < gGeoNumViews; i++) {
gGeoViews[i] = NULL;
}
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand += 0x0C << CMD_SIZE_SHIFT;
}
/*
0x09: Create orthographic projection scene graph node
cmd+0x02: s16 scale as a percentage (usually it's 100)
*/
void geo_layout_cmd_node_ortho_projection(void) {
struct GraphNodeOrthoProjection *graphNode;
f32 scale = (f32) cur_geo_cmd_s16(0x02) / 100.0f;
graphNode = init_graph_node_ortho_projection(gGraphNodePool, NULL, scale);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand += 0x04 << CMD_SIZE_SHIFT;
}
/*
0x0A: Create camera frustum scene graph node
cmd+0x01: u8 if nonzero, enable frustumFunc field
cmd+0x02: s16 field of view
cmd+0x04: s16 near
cmd+0x06: s16 far
[cmd+0x08: GraphNodeFunc frustumFunc]
*/
void geo_layout_cmd_node_perspective(void) {
struct GraphNodePerspective *graphNode;
GraphNodeFunc frustumFunc = NULL;
s16 fov = cur_geo_cmd_s16(0x02);
s16 near = cur_geo_cmd_s16(0x04);
s16 far = cur_geo_cmd_s16(0x06);
if (cur_geo_cmd_u8(0x01) != 0) {
// optional asm function
frustumFunc = (GraphNodeFunc) cur_geo_cmd_ptr(0x08);
gGeoLayoutCommand += 4 << CMD_SIZE_SHIFT;
}
graphNode = init_graph_node_perspective(gGraphNodePool, NULL, (f32) fov, near, far, frustumFunc, 0);
register_scene_graph_node(&graphNode->fnNode.node);
gGeoLayoutCommand += 0x08 << CMD_SIZE_SHIFT;
}
/*
0x0B: Create a scene graph node that groups other nodes without any
additional functionality
*/
void geo_layout_cmd_node_start(void) {
struct GraphNodeStart *graphNode;
graphNode = init_graph_node_start(gGraphNodePool, NULL);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand += 0x04 << CMD_SIZE_SHIFT;
}
// 0x1F: No operation
void geo_layout_cmd_nop3(void) {
gGeoLayoutCommand += 0x10 << CMD_SIZE_SHIFT;
}
/*
0x0C: Create zbuffer-toggling scene graph node
cmd+0x01: u8 enableZBuffer (1 = on, 0 = off)
*/
void geo_layout_cmd_node_master_list(void) {
struct GraphNodeMasterList *graphNode;
graphNode = init_graph_node_master_list(gGraphNodePool, NULL, cur_geo_cmd_u8(0x01));
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand += 0x04 << CMD_SIZE_SHIFT;
}
/*
0x0D: Create a level of detail graph node, which only renders at a certain
distance interval from the camera.
cmd+0x04: s16 minDistance
cmd+0x06: s16 maxDistance
*/
void geo_layout_cmd_node_level_of_detail(void) {
struct GraphNodeLevelOfDetail *graphNode;
s16 minDistance = cur_geo_cmd_s16(0x04);
s16 maxDistance = cur_geo_cmd_s16(0x06);
graphNode = init_graph_node_render_range(gGraphNodePool, NULL, minDistance, maxDistance);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand += 0x08 << CMD_SIZE_SHIFT;
}
/*
0x0E: Create switch-case scene graph node
cmd+0x02: s16 initialSelectedCase
cmd+0x04: GraphNodeFunc caseSelectorFunc
caseSelectorFunc returns an index which is used to select the child node to render.
Used for animating coins, blinking, color selection, etc.
*/
void geo_layout_cmd_node_switch_case(void) {
struct GraphNodeSwitchCase *graphNode;
graphNode =
init_graph_node_switch_case(gGraphNodePool, NULL,
cur_geo_cmd_s16(0x02), // case which is initially selected
0,
(GraphNodeFunc) cur_geo_cmd_ptr(0x04), // case update function
0);
register_scene_graph_node(&graphNode->fnNode.node);
gGeoLayoutCommand += 0x08 << CMD_SIZE_SHIFT;
}
/*
0x0F: Create a camera scene graph node (GraphNodeCamera). The focus sets the Camera's areaCen position.
cmd+0x02: s16 camera type (changes from course to course)
cmd+0x04: s16 posX
cmd+0x06: s16 posY
cmd+0x08: s16 posZ
cmd+0x0A: s16 focusX
cmd+0x0C: s16 focusY
cmd+0x0E: s16 focusZ
cmd+0x10: GraphNodeFunc func
*/
void geo_layout_cmd_node_camera(void) {
struct GraphNodeCamera *graphNode;
s16 *cmdPos = (s16 *) &gGeoLayoutCommand[4];
Vec3f pos, focus;
cmdPos = read_vec3s_to_vec3f(pos, cmdPos);
cmdPos = read_vec3s_to_vec3f(focus, cmdPos);
graphNode = init_graph_node_camera(gGraphNodePool, NULL, pos, focus,
(GraphNodeFunc) cur_geo_cmd_ptr(0x10), cur_geo_cmd_s16(0x02));
register_scene_graph_node(&graphNode->fnNode.node);
gGeoViews[0] = &graphNode->fnNode.node;
gGeoLayoutCommand += 0x14 << CMD_SIZE_SHIFT;
}
/*
0x10: Create translation & rotation scene graph node with optional display list
cmd+0x01: u8 params
(params & 0x80): if set, enable displayList field and drawingLayer
((params & 0x70)>>4): fieldLayout
(params & 0x0F): drawingLayer
fieldLayout == 0:
cmd+0x04: s16 xTranslation
cmd+0x06: s16 yTranslation
cmd+0x08: s16 zTranslation
cmd+0x0A: s16 xRotation
cmd+0x0C: s16 yRotation
cmd+0x0E: s16 zRotation
fieldLayout == 1:
cmd+0x02: s16 xTranslation
cmd+0x04: s16 yTranslation
cmd+0x06: s16 zTranslation
(rotation gets copied from gVec3sZero)
fieldLayout == 2:
cmd+0x02: s16 xRotation
cmd+0x04: s16 yRotation
cmd+0x06: s16 zRotation
(translation gets copied from gVec3sZero)
fieldLayout == 3:
cmd+0x02: s16 yRotation
(translation gets copied from gVec3sZero)
(x and z translation are set to 0)
[cmd+var: void *displayList]
*/
void geo_layout_cmd_node_translation_rotation(void) {
struct GraphNodeTranslationRotation *graphNode;
Vec3s translation, rotation;
void *displayList = NULL;
s16 drawingLayer = 0;
s16 params = cur_geo_cmd_u8(0x01);
s16 *cmdPos = (s16 *) gGeoLayoutCommand;
switch ((params & 0x70) >> 4) {
case 0:
cmdPos = read_vec3s(translation, &cmdPos[2]);
cmdPos = read_vec3s_angle(rotation, cmdPos);
break;
case 1:
cmdPos = read_vec3s(translation, &cmdPos[1]);
vec3s_copy(rotation, gVec3sZero);
break;
case 2:
cmdPos = read_vec3s_angle(rotation, &cmdPos[1]);
vec3s_copy(translation, gVec3sZero);
break;
case 3:
vec3s_copy(translation, gVec3sZero);
vec3s_set(rotation, 0, (cmdPos[1] << 15) / 180, 0);
cmdPos += 2 << CMD_SIZE_SHIFT;
break;
}
if (params & 0x80) {
displayList = *(void **) &cmdPos[0];
drawingLayer = params & 0x0F;
cmdPos += 2 << CMD_SIZE_SHIFT;
}
graphNode = init_graph_node_translation_rotation(gGraphNodePool, NULL, drawingLayer, displayList,
translation, rotation);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand = (u8 *) cmdPos;
}
/*
0x11: Create translation scene graph node with optional display list
cmd+0x01: u8 params
(params & 0x80): if set, enable displayList field and drawingLayer
(params & 0x0F): drawingLayer
cmd+0x02: s16 xTranslation
cmd+0x04: s16 yTranslation
cmd+0x06: s16 zTranslation
[cmd+0x08: void *displayList]
*/
void geo_layout_cmd_node_translation(void) {
struct GraphNodeTranslation *graphNode;
Vec3s translation;
s16 drawingLayer = 0;
s16 params = cur_geo_cmd_u8(0x01);
s16 *cmdPos = (s16 *) gGeoLayoutCommand;
void *displayList = NULL;
cmdPos = read_vec3s(translation, &cmdPos[1]);
if (params & 0x80) {
displayList = *(void **) &cmdPos[0];
drawingLayer = params & 0x0F;
cmdPos += 2 << CMD_SIZE_SHIFT;
}
graphNode =
init_graph_node_translation(gGraphNodePool, NULL, drawingLayer, displayList, translation);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand = (u8 *) cmdPos;
}
/*
0x12: Create ? scene graph node
cmd+0x01: u8 params
(params & 0x80): if set, enable displayList field and drawingLayer
(params & 0x0F): drawingLayer
cmd+0x02: s16 unkX
cmd+0x04: s16 unkY
cmd+0x06: s16 unkZ
[cmd+0x08: void *displayList]
*/
void geo_layout_cmd_node_rotation(void) {
struct GraphNodeRotation *graphNode;
Vec3s sp2c;
s16 drawingLayer = 0;
s16 params = cur_geo_cmd_u8(0x01);
s16 *cmdPos = (s16 *) gGeoLayoutCommand;
void *displayList = NULL;
cmdPos = read_vec3s_angle(sp2c, &cmdPos[1]);
if (params & 0x80) {
displayList = *(void **) &cmdPos[0];
drawingLayer = params & 0x0F;
cmdPos += 2 << CMD_SIZE_SHIFT;
}
graphNode = init_graph_node_rotation(gGraphNodePool, NULL, drawingLayer, displayList, sp2c);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand = (u8 *) cmdPos;
}
/*
0x1D: Create scale scene graph node with optional display list
cmd+0x01: u8 params
(params & 0x80): if set, enable displayList field and drawingLayer
(params & 0x0F): drawingLayer
cmd+0x04: u32 scale (0x10000 = 1.0)
[cmd+0x08: void *displayList]
*/
void geo_layout_cmd_node_scale(void) {
struct GraphNodeScale *graphNode;
s16 drawingLayer = 0;
s16 params = cur_geo_cmd_u8(0x01);
f32 scale = cur_geo_cmd_u32(0x04) / 65536.0f;
void *displayList = NULL;
if (params & 0x80) {
displayList = cur_geo_cmd_ptr(0x08);
drawingLayer = params & 0x0F;
gGeoLayoutCommand += 4 << CMD_SIZE_SHIFT;
}
graphNode = init_graph_node_scale(gGraphNodePool, NULL, drawingLayer, displayList, scale);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand += 0x08 << CMD_SIZE_SHIFT;
}
// 0x1E: No operation
void geo_layout_cmd_nop2(void) {
gGeoLayoutCommand += 0x08 << CMD_SIZE_SHIFT;
}
/*
0x13: Create a scene graph node that is rotated by the object's animation.
cmd+0x01: u8 drawingLayer
cmd+0x02: s16 xTranslation
cmd+0x04: s16 yTranslation
cmd+0x06: s16 zTranslation
cmd+0x08: void *displayList
*/
void geo_layout_cmd_node_animated_part(void) {
struct GraphNodeAnimatedPart *graphNode;
Vec3s translation;
s32 drawingLayer = cur_geo_cmd_u8(0x01);
void *displayList = cur_geo_cmd_ptr(0x08);
s16 *cmdPos = (s16 *) gGeoLayoutCommand;
read_vec3s(translation, &cmdPos[1]);
graphNode =
init_graph_node_animated_part(gGraphNodePool, NULL, drawingLayer, displayList, translation);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand += 0x0C << CMD_SIZE_SHIFT;
}
/*
0x14: Create billboarding node with optional display list
cmd+0x01: u8 params
(params & 0x80): if set, enable displayList field and drawingLayer
(params & 0x0F): drawingLayer
cmd+0x02: s16 xTranslation
cmd+0x04: s16 yTranslation
cmd+0x06: s16 zTranslation
[cmd+0x08: void *displayList]
*/
void geo_layout_cmd_node_billboard(void) {
struct GraphNodeBillboard *graphNode;
Vec3s translation;
s16 drawingLayer = 0;
s16 params = cur_geo_cmd_u8(0x01);
s16 *cmdPos = (s16 *) gGeoLayoutCommand;
void *displayList = NULL;
cmdPos = read_vec3s(translation, &cmdPos[1]);
if (params & 0x80) {
displayList = *(void **) &cmdPos[0];
drawingLayer = params & 0x0F;
cmdPos += 2 << CMD_SIZE_SHIFT;
}
graphNode = init_graph_node_billboard(gGraphNodePool, NULL, drawingLayer, displayList, translation);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand = (u8 *) cmdPos;
}
/*
0x15: Create plain display list scene graph node
cmd+0x01: u8 drawingLayer
cmd+0x04: void *displayList
*/
void geo_layout_cmd_node_display_list(void) {
struct GraphNodeDisplayList *graphNode;
s32 drawingLayer = cur_geo_cmd_u8(0x01);
void *displayList = cur_geo_cmd_ptr(0x04);
graphNode = init_graph_node_display_list(gGraphNodePool, NULL, drawingLayer, displayList);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand += 0x08 << CMD_SIZE_SHIFT;
}
/*
0x16: Create shadow scene graph node
cmd+0x02: s16 shadowType
cmd+0x04: s16 shadowSolidity
cmd+0x06: s16 shadowScale
*/
void geo_layout_cmd_node_shadow(void) {
struct GraphNodeShadow *graphNode;
u8 shadowType = cur_geo_cmd_s16(0x02);
u8 shadowSolidity = cur_geo_cmd_s16(0x04);
s16 shadowScale = cur_geo_cmd_s16(0x06);
graphNode = init_graph_node_shadow(gGraphNodePool, NULL, shadowScale, shadowSolidity, shadowType);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand += 0x08 << CMD_SIZE_SHIFT;
}
// 0x17: Create scene graph node that manages the group of all object nodes
void geo_layout_cmd_node_object_parent(void) {
struct GraphNodeObjectParent *graphNode;
graphNode = init_graph_node_object_parent(gGraphNodePool, NULL, &gObjParentGraphNode);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand += 0x04 << CMD_SIZE_SHIFT;
}
/*
0x18: Create dynamically generated displaylist scene graph node
cmd+0x02: s16 parameter
cmd+0x04: GraphNodeFunc func
*/
void geo_layout_cmd_node_generated(void) {
struct GraphNodeGenerated *graphNode;
graphNode = init_graph_node_generated(gGraphNodePool, NULL,
(GraphNodeFunc) cur_geo_cmd_ptr(0x04), // asm function
cur_geo_cmd_s16(0x02)); // parameter
register_scene_graph_node(&graphNode->fnNode.node);
gGeoLayoutCommand += 0x08 << CMD_SIZE_SHIFT;
}
/*
0x19: Create background scene graph node
cmd+0x02: s16 background // background ID, or RGBA5551 color if backgroundFunc is null
cmd+0x04: GraphNodeFunc backgroundFunc
*/
void geo_layout_cmd_node_background(void) {
struct GraphNodeBackground *graphNode;
graphNode = init_graph_node_background(
gGraphNodePool, NULL,
cur_geo_cmd_s16(0x02), // background ID, or RGBA5551 color if asm function is null
(GraphNodeFunc) cur_geo_cmd_ptr(0x04), // asm function
0);
register_scene_graph_node(&graphNode->fnNode.node);
gGeoLayoutCommand += 0x08 << CMD_SIZE_SHIFT;
}
// 0x1A: No operation
void geo_layout_cmd_nop(void) {
gGeoLayoutCommand += 0x08 << CMD_SIZE_SHIFT;
}
/*
0x1B: Copy the shared children from the object parent from a specific view
to a newly created object parent node.
cmd+0x02: s16 index (of gGeoViews)
*/
void geo_layout_cmd_copy_view(void) {
struct GraphNodeObjectParent *graphNode;
struct GraphNode *node = NULL;
s16 index = cur_geo_cmd_s16(0x02);
if (index >= 0) {
node = gGeoViews[index];
if (node->type == GRAPH_NODE_TYPE_OBJECT_PARENT) {
node = ((struct GraphNodeObjectParent *) node)->sharedChild;
} else {
node = NULL;
}
}
graphNode = init_graph_node_object_parent(gGraphNodePool, NULL, node);
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand += 0x04 << CMD_SIZE_SHIFT;
}
/*
0x1C: Create a held object scene graph node
cmd+0x01: u8 unused
cmd+0x02: s16 offsetX
cmd+0x04: s16 offsetY
cmd+0x06: s16 offsetZ
cmd+0x08: GraphNodeFunc nodeFunc
*/
void geo_layout_cmd_node_held_obj(void) {
struct GraphNodeHeldObject *graphNode;
Vec3s offset;
read_vec3s(offset, (s16 *) &gGeoLayoutCommand[0x02]);
graphNode = init_graph_node_held_object(
gGraphNodePool, NULL, NULL, offset, (GraphNodeFunc) cur_geo_cmd_ptr(0x08), cur_geo_cmd_u8(0x01));
register_scene_graph_node(&graphNode->fnNode.node);
gGeoLayoutCommand += 0x0C << CMD_SIZE_SHIFT;
}
/*
0x20: Create a scene graph node that specifies for an object the radius that
is used for frustum culling.
cmd+0x02: s16 cullingRadius
*/
void geo_layout_cmd_node_culling_radius(void) {
struct GraphNodeCullingRadius *graphNode;
graphNode = init_graph_node_culling_radius(gGraphNodePool, NULL, cur_geo_cmd_s16(0x02));
register_scene_graph_node(&graphNode->node);
gGeoLayoutCommand += 0x04 << CMD_SIZE_SHIFT;
}
struct GraphNode *process_geo_layout(struct AllocOnlyPool *pool, void *segptr) {
// set by register_scene_graph_node when gCurGraphNodeIndex is 0
// and gCurRootGraphNode is NULL
gCurRootGraphNode = NULL;
gGeoNumViews = 0; // number of entries in gGeoViews
gCurGraphNodeList[0] = 0;
gCurGraphNodeIndex = 0; // incremented by cmd_open_node, decremented by cmd_close_node
gGeoLayoutStackIndex = 2;
gGeoLayoutReturnIndex = 2; // stack index is often copied here?
gGeoLayoutCommand = segmented_to_virtual(segptr);
gGraphNodePool = pool;
gGeoLayoutStack[0] = 0;
gGeoLayoutStack[1] = 0;
while (gGeoLayoutCommand != NULL) {
GeoLayoutJumpTable[gGeoLayoutCommand[0x00]]();
}
return gCurRootGraphNode;
}
|
7ba15ca639cb148871a5232c015990b1fd73c9a0
|
eecd5e4c50d8b78a769bcc2675250576bed34066
|
/src/ts/tutorials/ex52.c
|
2a223ed48a56da8e5363c4fd530141f1cbfbc84a
|
[
"BSD-2-Clause"
] |
permissive
|
petsc/petsc
|
3b1a04fea71858e0292f9fd4d04ea11618c50969
|
9c5460f9064ca60dd71a234a1f6faf93e7a6b0c9
|
refs/heads/main
| 2023-08-17T20:51:16.507070
| 2023-08-17T16:08:06
| 2023-08-17T16:08:06
| 8,691,401
| 341
| 169
|
NOASSERTION
| 2023-03-29T11:02:58
| 2013-03-10T20:55:21
|
C
|
UTF-8
|
C
| false
| false
| 14,100
|
c
|
ex52.c
|
static char help[] = "Simple Advection-diffusion equation solved using FVM in DMPLEX\n";
/*
Solves the simple advection equation given by
q_t + u (q_x) + v (q_y) - D (q_xx + q_yy) = 0 using FVM and First Order Upwind discretization.
with a user defined initial condition.
with dirichlet/neumann conditions on the four boundaries of the domain.
User can define the mesh parameters either in the command line or inside
the ProcessOptions() routine.
Contributed by: Mukkund Sunjii, Domenico Lahaye
*/
#include <petscdmplex.h>
#include <petscts.h>
#include <petscblaslapack.h>
#if defined(PETSC_HAVE_CGNS)
#undef I
#include <cgnslib.h>
#endif
/*
User-defined routines
*/
extern PetscErrorCode FormFunction(TS, PetscReal, Vec, Vec, void *), FormInitialSolution(DM, Vec);
extern PetscErrorCode MyTSMonitor(TS, PetscInt, PetscReal, Vec, void *);
extern PetscErrorCode MySNESMonitor(SNES, PetscInt, PetscReal, PetscViewerAndFormat *);
/* Defining the usr defined context */
typedef struct {
PetscScalar diffusion;
PetscReal u, v;
PetscScalar delta_x, delta_y;
} AppCtx;
/* Options for the scenario */
static PetscErrorCode ProcessOptions(MPI_Comm comm, AppCtx *options)
{
PetscFunctionBeginUser;
options->u = 2.5;
options->v = 0.0;
options->diffusion = 0.0;
PetscOptionsBegin(comm, "", "Meshing Problem Options", "DMPLEX");
PetscCall(PetscOptionsReal("-u", "The x component of the convective coefficient", "advection_DMPLEX.c", options->u, &options->u, NULL));
PetscCall(PetscOptionsReal("-v", "The y component of the convective coefficient", "advection_DMPLEX.c", options->v, &options->v, NULL));
PetscCall(PetscOptionsScalar("-diffus", "The diffusive coefficient", "advection_DMPLEX.c", options->diffusion, &options->diffusion, NULL));
PetscOptionsEnd();
PetscFunctionReturn(PETSC_SUCCESS);
}
/*
User can provide the file containing the mesh.
Or can generate the mesh using DMPlexCreateBoxMesh with the specified options.
*/
static PetscErrorCode CreateMesh(MPI_Comm comm, AppCtx *user, DM *dm)
{
PetscFunctionBeginUser;
PetscCall(DMCreate(comm, dm));
PetscCall(DMSetType(*dm, DMPLEX));
PetscCall(DMSetFromOptions(*dm));
PetscCall(DMViewFromOptions(*dm, NULL, "-dm_view"));
{
DMLabel label;
PetscCall(DMGetLabel(*dm, "boundary", &label));
PetscCall(DMPlexLabelComplete(*dm, label));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
/* This routine is responsible for defining the local solution vector x
with a given initial solution.
The initial solution can be modified accordingly inside the loops.
No need for a local vector because there is exchange of information
across the processors. Unlike for FormFunction which depends on the neighbours */
PetscErrorCode FormInitialSolution(DM da, Vec U)
{
PetscScalar *u;
PetscInt cell, cStart, cEnd;
PetscReal cellvol, centroid[3], normal[3];
PetscFunctionBeginUser;
/* Get pointers to vector data */
PetscCall(VecGetArray(U, &u));
/* Get local grid boundaries */
PetscCall(DMPlexGetHeightStratum(da, 0, &cStart, &cEnd));
/* Assigning the values at the cell centers based on x and y directions */
PetscCall(DMGetCoordinatesLocalSetUp(da));
for (cell = cStart; cell < cEnd; cell++) {
PetscCall(DMPlexComputeCellGeometryFVM(da, cell, &cellvol, centroid, normal));
if (centroid[0] > 0.9 && centroid[0] < 0.95) {
if (centroid[1] > 0.9 && centroid[1] < 0.95) u[cell] = 2.0;
} else u[cell] = 0;
}
PetscCall(VecRestoreArray(U, &u));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MyTSMonitor(TS ts, PetscInt step, PetscReal ptime, Vec v, void *ctx)
{
PetscReal norm;
MPI_Comm comm;
PetscFunctionBeginUser;
if (step < 0) PetscFunctionReturn(PETSC_SUCCESS); /* step of -1 indicates an interpolated solution */
PetscCall(VecNorm(v, NORM_2, &norm));
PetscCall(PetscObjectGetComm((PetscObject)ts, &comm));
PetscCall(PetscPrintf(comm, "timestep %" PetscInt_FMT " time %g norm %g\n", step, (double)ptime, (double)norm));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*
MySNESMonitor - illustrate how to set user-defined monitoring routine for SNES.
Input Parameters:
snes - the SNES context
its - iteration number
fnorm - 2-norm function value (may be estimated)
ctx - optional user-defined context for private data for the
monitor routine, as set by SNESMonitorSet()
*/
PetscErrorCode MySNESMonitor(SNES snes, PetscInt its, PetscReal fnorm, PetscViewerAndFormat *vf)
{
PetscFunctionBeginUser;
PetscCall(SNESMonitorDefaultShort(snes, its, fnorm, vf));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*
FormFunction - Evaluates nonlinear function, F(x).
Input Parameters:
. ts - the TS context
. X - input vector
. ctx - optional user-defined context, as set by SNESSetFunction()
Output Parameter:
. F - function vector
*/
PetscErrorCode FormFunction(TS ts, PetscReal ftime, Vec X, Vec F, void *ctx)
{
AppCtx *user = (AppCtx *)ctx;
DM da;
PetscScalar *x, *f;
Vec localX;
PetscInt fStart, fEnd, nF;
PetscInt cell, cStart, cEnd, nC;
DM dmFace; /* DMPLEX for face geometry */
PetscFV fvm; /* specify type of FVM discretization */
Vec cellGeom, faceGeom; /* vector of structs related to cell/face geometry*/
const PetscScalar *fgeom; /* values stored in the vector facegeom */
PetscFVFaceGeom *fgA; /* struct with face geometry information */
const PetscInt *cellcone, *cellsupport;
PetscScalar flux_east, flux_west, flux_north, flux_south, flux_centre;
PetscScalar centroid_x[2], centroid_y[2], boundary = 0.0;
PetscScalar boundary_left = 0.0;
PetscReal u_plus, u_minus, v_plus, v_minus, zero = 0.0;
PetscScalar delta_x, delta_y;
/* Get the local vector from the DM object. */
PetscFunctionBeginUser;
PetscCall(TSGetDM(ts, &da));
PetscCall(DMGetLocalVector(da, &localX));
/* Scatter ghost points to local vector,using the 2-step process
DMGlobalToLocalBegin(),DMGlobalToLocalEnd(). */
PetscCall(DMGlobalToLocalBegin(da, X, INSERT_VALUES, localX));
PetscCall(DMGlobalToLocalEnd(da, X, INSERT_VALUES, localX));
/* Get pointers to vector data. */
PetscCall(VecGetArray(localX, &x));
PetscCall(VecGetArray(F, &f));
/* Obtaining local cell and face ownership */
PetscCall(DMPlexGetHeightStratum(da, 0, &cStart, &cEnd));
PetscCall(DMPlexGetHeightStratum(da, 1, &fStart, &fEnd));
/* Creating the PetscFV object to obtain face and cell geometry.
Later to be used to compute face centroid to find cell widths. */
PetscCall(PetscFVCreate(PETSC_COMM_WORLD, &fvm));
PetscCall(PetscFVSetType(fvm, PETSCFVUPWIND));
/*....Retrieve precomputed cell geometry....*/
PetscCall(DMPlexGetDataFVM(da, fvm, &cellGeom, &faceGeom, NULL));
PetscCall(VecGetDM(faceGeom, &dmFace));
PetscCall(VecGetArrayRead(faceGeom, &fgeom));
/* Spanning through all the cells and an inner loop through the faces. Find the
face neighbors and pick the upwinded cell value for flux. */
u_plus = PetscMax(user->u, zero);
u_minus = PetscMin(user->u, zero);
v_plus = PetscMax(user->v, zero);
v_minus = PetscMin(user->v, zero);
for (cell = cStart; cell < cEnd; cell++) {
/* Obtaining the faces of the cell */
PetscCall(DMPlexGetConeSize(da, cell, &nF));
PetscCall(DMPlexGetCone(da, cell, &cellcone));
/* south */
PetscCall(DMPlexPointLocalRead(dmFace, cellcone[0], fgeom, &fgA));
centroid_y[0] = fgA->centroid[1];
/* North */
PetscCall(DMPlexPointLocalRead(dmFace, cellcone[2], fgeom, &fgA));
centroid_y[1] = fgA->centroid[1];
/* West */
PetscCall(DMPlexPointLocalRead(dmFace, cellcone[3], fgeom, &fgA));
centroid_x[0] = fgA->centroid[0];
/* East */
PetscCall(DMPlexPointLocalRead(dmFace, cellcone[1], fgeom, &fgA));
centroid_x[1] = fgA->centroid[0];
/* Computing the cell widths in the x and y direction */
delta_x = centroid_x[1] - centroid_x[0];
delta_y = centroid_y[1] - centroid_y[0];
/* Getting the neighbors of each face
Going through the faces by the order (cellcone) */
/* cellcone[0] - south */
PetscCall(DMPlexGetSupportSize(da, cellcone[0], &nC));
PetscCall(DMPlexGetSupport(da, cellcone[0], &cellsupport));
if (nC == 2) flux_south = (x[cellsupport[0]] * (-v_plus - user->diffusion * delta_x)) / delta_y;
else flux_south = (boundary * (-v_plus - user->diffusion * delta_x)) / delta_y;
/* cellcone[1] - east */
PetscCall(DMPlexGetSupportSize(da, cellcone[1], &nC));
PetscCall(DMPlexGetSupport(da, cellcone[1], &cellsupport));
if (nC == 2) flux_east = (x[cellsupport[1]] * (u_minus - user->diffusion * delta_y)) / delta_x;
else flux_east = (boundary * (u_minus - user->diffusion * delta_y)) / delta_x;
/* cellcone[2] - north */
PetscCall(DMPlexGetSupportSize(da, cellcone[2], &nC));
PetscCall(DMPlexGetSupport(da, cellcone[2], &cellsupport));
if (nC == 2) flux_north = (x[cellsupport[1]] * (v_minus - user->diffusion * delta_x)) / delta_y;
else flux_north = (boundary * (v_minus - user->diffusion * delta_x)) / delta_y;
/* cellcone[3] - west */
PetscCall(DMPlexGetSupportSize(da, cellcone[3], &nC));
PetscCall(DMPlexGetSupport(da, cellcone[3], &cellsupport));
if (nC == 2) flux_west = (x[cellsupport[0]] * (-u_plus - user->diffusion * delta_y)) / delta_x;
else flux_west = (boundary_left * (-u_plus - user->diffusion * delta_y)) / delta_x;
/* Contribution by the cell to the fluxes */
flux_centre = x[cell] * ((u_plus - u_minus + 2 * user->diffusion * delta_y) / delta_x + (v_plus - v_minus + 2 * user->diffusion * delta_x) / delta_y);
/* Calculating the net flux for each cell
and computing the RHS time derivative f[.] */
f[cell] = -(flux_centre + flux_east + flux_west + flux_north + flux_south);
}
PetscCall(PetscFVDestroy(&fvm));
PetscCall(VecRestoreArray(localX, &x));
PetscCall(VecRestoreArray(F, &f));
PetscCall(DMRestoreLocalVector(da, &localX));
PetscFunctionReturn(PETSC_SUCCESS);
}
int main(int argc, char **argv)
{
TS ts; /* time integrator */
SNES snes;
Vec x, r; /* solution, residual vectors */
DM da;
PetscMPIInt rank;
PetscViewerAndFormat *vf;
AppCtx user; /* mesh context */
PetscInt dim, numFields = 1, numBC, i;
PetscInt numComp[1];
PetscInt numDof[12];
PetscInt bcField[1];
PetscSection section;
IS bcPointIS[1];
/* Initialize program */
PetscFunctionBeginUser;
PetscCall(PetscInitialize(&argc, &argv, (char *)0, help));
PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
/* Create distributed array (DMPLEX) to manage parallel grid and vectors */
PetscCall(ProcessOptions(PETSC_COMM_WORLD, &user));
PetscCall(CreateMesh(PETSC_COMM_WORLD, &user, &da));
PetscCall(DMGetDimension(da, &dim));
/* Specifying the fields and dof for the formula through PETSc Section
Create a scalar field u with 1 component on cells, faces and edges.
Alternatively, the field information could be added through a PETSCFV object
using DMAddField(...).*/
numComp[0] = 1;
for (i = 0; i < numFields * (dim + 1); ++i) numDof[i] = 0;
numDof[0 * (dim + 1)] = 1;
numDof[0 * (dim + 1) + dim - 1] = 1;
numDof[0 * (dim + 1) + dim] = 1;
/* Setup boundary conditions */
numBC = 1;
/* Prescribe a Dirichlet condition on u on the boundary
Label "marker" is made by the mesh creation routine */
bcField[0] = 0;
PetscCall(DMGetStratumIS(da, "marker", 1, &bcPointIS[0]));
/* Create a PetscSection with this data layout */
PetscCall(DMSetNumFields(da, numFields));
PetscCall(DMPlexCreateSection(da, NULL, numComp, numDof, numBC, bcField, NULL, bcPointIS, NULL, §ion));
/* Name the Field variables */
PetscCall(PetscSectionSetFieldName(section, 0, "u"));
/* Tell the DM to use this section (with the specified fields and dof) */
PetscCall(DMSetLocalSection(da, section));
/* Extract global vectors from DMDA; then duplicate for remaining
vectors that are the same types */
/* Create a Vec with this layout and view it */
PetscCall(DMGetGlobalVector(da, &x));
PetscCall(VecDuplicate(x, &r));
/* Create timestepping solver context */
PetscCall(TSCreate(PETSC_COMM_WORLD, &ts));
PetscCall(TSSetProblemType(ts, TS_NONLINEAR));
PetscCall(TSSetRHSFunction(ts, NULL, FormFunction, &user));
PetscCall(TSSetMaxTime(ts, 1.0));
PetscCall(TSSetExactFinalTime(ts, TS_EXACTFINALTIME_STEPOVER));
PetscCall(TSMonitorSet(ts, MyTSMonitor, PETSC_VIEWER_STDOUT_WORLD, NULL));
PetscCall(TSSetDM(ts, da));
/* Customize nonlinear solver */
PetscCall(TSSetType(ts, TSEULER));
PetscCall(TSGetSNES(ts, &snes));
PetscCall(PetscViewerAndFormatCreate(PETSC_VIEWER_STDOUT_WORLD, PETSC_VIEWER_DEFAULT, &vf));
PetscCall(SNESMonitorSet(snes, (PetscErrorCode(*)(SNES, PetscInt, PetscReal, void *))MySNESMonitor, vf, (PetscErrorCode(*)(void **))PetscViewerAndFormatDestroy));
/* Set initial conditions */
PetscCall(FormInitialSolution(da, x));
PetscCall(TSSetTimeStep(ts, .0001));
PetscCall(TSSetSolution(ts, x));
/* Set runtime options */
PetscCall(TSSetFromOptions(ts));
/* Solve nonlinear system */
PetscCall(TSSolve(ts, x));
/* Clean up routine */
PetscCall(DMRestoreGlobalVector(da, &x));
PetscCall(ISDestroy(&bcPointIS[0]));
PetscCall(PetscSectionDestroy(§ion));
PetscCall(VecDestroy(&r));
PetscCall(TSDestroy(&ts));
PetscCall(DMDestroy(&da));
PetscCall(PetscFinalize());
return 0;
}
/*TEST
test:
suffix: 0
args: -dm_plex_simplex 0 -dm_plex_box_faces 20,20 -dm_plex_boundary_label boundary -ts_max_steps 5 -ts_type rk
requires: !single !complex triangle ctetgen
TEST*/
|
a5a67011613a95295e42d8a5080263aee7626af0
|
aa3befea459382dc5c01c925653d54f435b3fb0f
|
/net/socket/net_close.c
|
4ffc56b5c5000009f01f7c027e750450a65c9e68
|
[
"MIT-open-group",
"BSD-3-Clause",
"HPND-sell-variant",
"BSD-4-Clause-UC",
"LicenseRef-scancode-warranty-disclaimer",
"MIT-0",
"LicenseRef-scancode-bsd-atmel",
"LicenseRef-scancode-gary-s-brown",
"LicenseRef-scancode-proprietary-license",
"SunPro",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-other-permissive",
"HPND",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"GPL-1.0-or-later",
"CC-BY-2.0",
"CC-BY-4.0"
] |
permissive
|
apache/nuttx
|
14519a7bff4a87935d94fb8fb2b19edb501c7cec
|
606b6d9310fb25c7d92c6f95bf61737e3c79fa0f
|
refs/heads/master
| 2023-08-25T06:55:45.822534
| 2023-08-23T16:03:31
| 2023-08-24T21:25:47
| 228,103,273
| 407
| 241
|
Apache-2.0
| 2023-09-14T18:26:05
| 2019-12-14T23:27:55
|
C
|
UTF-8
|
C
| false
| false
| 3,583
|
c
|
net_close.c
|
/****************************************************************************
* net/socket/net_close.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <stdint.h>
#include <string.h>
#include <stdbool.h>
#include <errno.h>
#include <debug.h>
#include <assert.h>
#include <nuttx/net/net.h>
#include "socket/socket.h"
#ifdef CONFIG_NET
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: psock_close
*
* Description:
* Performs the close operation on a socket instance
*
* Input Parameters:
* psock Socket instance
*
* Returned Value:
* Returns zero (OK) on success. On failure, it returns a negated errno
* value to indicate the nature of the error.
*
* Assumptions:
*
****************************************************************************/
int psock_close(FAR struct socket *psock)
{
int ret;
/* Verify that the sockfd corresponds to valid, allocated socket */
if (psock == NULL)
{
return -EBADF;
}
/* We perform the close operation only if this is the last count on
* the socket. (actually, I think the socket crefs only takes the values
* 0 and 1 right now).
*
* It is possible for a psock to have no connection, e.g. a TCP socket
* waiting in accept.
*/
if (psock->s_conn != NULL)
{
FAR struct socket_conn_s *conn = psock->s_conn;
/* Assume that the socket close operation will be successful. Save
* the current flags and mark the socket uninitialized. This avoids
* race conditions in the SMP case. We save the flags as a type
* unsigned int in case the size of s_flags changes in the future
* (currently uint8_t).
*/
unsigned int saveflags = conn->s_flags;
conn->s_flags &= ~_SF_INITD;
/* Let the address family's close() method handle the operation */
DEBUGASSERT(psock->s_sockif != NULL &&
psock->s_sockif->si_close != NULL);
ret = psock->s_sockif->si_close(psock);
/* Was the close successful */
if (ret < 0)
{
/* No.. restore the socket flags */
conn->s_flags = saveflags;
return ret;
}
}
/* The socket will not persist... reset it */
memset(psock, 0, sizeof(*psock));
return OK;
}
#endif /* CONFIG_NET */
|
266a70e5bafcd431a4477df0e5e2d750f7c9bce0
|
e1cddfd754d952134e72dfd03522c5ea4fb6008e
|
/src/plugins/wireguard/wireguard_cli.c
|
5fa620507d6b4438f717c7f0065a7f10231de50c
|
[
"Apache-2.0"
] |
permissive
|
FDio/vpp
|
0ad30fa1bec2975ffa6b66b45c9f4f32163123b6
|
f234b0d4626d7e686422cc9dfd25958584f4931e
|
refs/heads/master
| 2023-08-31T16:09:04.068646
| 2022-03-14T09:49:15
| 2023-08-31T09:50:00
| 96,556,718
| 1,048
| 630
|
Apache-2.0
| 2023-06-21T05:39:17
| 2017-07-07T16:29:40
|
C
|
UTF-8
|
C
| false
| false
| 10,749
|
c
|
wireguard_cli.c
|
/*
* Copyright (c) 2020 Cisco and/or its affiliates.
* Copyright (c) 2020 Doc.ai and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <wireguard/wireguard.h>
#include <wireguard/wireguard_key.h>
#include <wireguard/wireguard_peer.h>
#include <wireguard/wireguard_if.h>
static clib_error_t *
wg_if_create_cli (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
wg_main_t *wmp = &wg_main;
unformat_input_t _line_input, *line_input = &_line_input;
u8 private_key[NOISE_PUBLIC_KEY_LEN + 1];
u32 instance, sw_if_index;
ip_address_t src_ip;
clib_error_t *error;
u8 *private_key_64;
u32 port, generate_key = 0;
int rv;
error = NULL;
instance = sw_if_index = ~0;
private_key_64 = 0;
port = 0;
wg_feature_init (wmp);
if (unformat_user (input, unformat_line_input, line_input))
{
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (line_input, "instance %d", &instance))
;
else if (unformat (line_input, "private-key %s", &private_key_64))
{
if (!(key_from_base64 (private_key_64,
NOISE_KEY_LEN_BASE64, private_key)))
{
error = clib_error_return (0, "Error parsing private key");
break;
}
}
else if (unformat (line_input, "listen-port %d", &port))
;
else if (unformat (line_input, "port %d", &port))
;
else if (unformat (line_input, "generate-key"))
generate_key = 1;
else
if (unformat (line_input, "src %U", unformat_ip_address, &src_ip))
;
else
{
error = clib_error_return (0, "unknown input: %U",
format_unformat_error, line_input);
break;
}
}
unformat_free (line_input);
if (error)
return error;
}
if (generate_key)
curve25519_gen_secret (private_key);
rv = wg_if_create (instance, private_key, port, &src_ip, &sw_if_index);
if (rv)
return clib_error_return (0, "wireguard interface create failed");
vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
sw_if_index);
return 0;
}
/*?
* Create a Wireguard interface.
?*/
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (wg_if_create_command, static) = {
.path = "wireguard create",
.short_help = "wireguard create listen-port <port> "
"private-key <key> src <IP> [generate-key]",
.function = wg_if_create_cli,
};
/* *INDENT-ON* */
static clib_error_t *
wg_if_delete_cli (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
wg_main_t *wmp = &wg_main;
vnet_main_t *vnm;
u32 sw_if_index;
int rv;
wg_feature_init (wmp);
vnm = vnet_get_main ();
sw_if_index = ~0;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat
(input, "%U", unformat_vnet_sw_interface, vnm, &sw_if_index))
;
else
break;
}
if (~0 != sw_if_index)
{
rv = wg_if_delete (sw_if_index);
if (rv)
return clib_error_return (0, "wireguard interface delete failed");
}
else
return clib_error_return (0, "no such interface: %U",
format_unformat_error, input);
return 0;
}
/*?
* Delete a Wireguard interface.
?*/
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (wg_if_delete_command, static) = {
.path = "wireguard delete",
.short_help = "wireguard delete <interface>",
.function = wg_if_delete_cli,
};
/* *INDENT-ON* */
static clib_error_t *
wg_peer_add_command_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
vnet_main_t *vnm = vnet_get_main ();
wg_main_t *wmp = &wg_main;
clib_error_t *error = NULL;
unformat_input_t _line_input, *line_input = &_line_input;
u8 *public_key_64 = 0;
u8 public_key[NOISE_PUBLIC_KEY_LEN + 1];
fib_prefix_t allowed_ip, *allowed_ips = NULL;
ip_prefix_t pfx;
ip_address_t ip = ip_address_initializer;
u32 portDst = 0, table_id = 0;
u32 persistent_keepalive = 0;
u32 tun_sw_if_index = ~0;
u32 peer_index;
int rv;
if (!unformat_user (input, unformat_line_input, line_input))
return 0;
wg_feature_init (wmp);
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (line_input, "public-key %s", &public_key_64))
{
if (!(key_from_base64 (public_key_64,
NOISE_KEY_LEN_BASE64, public_key)))
{
error = clib_error_return (0, "Error parsing private key");
goto done;
}
}
else if (unformat (line_input, "endpoint %U", unformat_ip_address, &ip))
;
else if (unformat (line_input, "table-id %d", &table_id))
;
else if (unformat (line_input, "dst-port %d", &portDst))
;
else if (unformat (line_input, "persistent-keepalive %d",
&persistent_keepalive))
;
else if (unformat (line_input, "allowed-ip %U",
unformat_ip_prefix, &pfx))
{
ip_prefix_to_fib_prefix (&pfx, &allowed_ip);
vec_add1 (allowed_ips, allowed_ip);
}
else if (unformat (line_input, "%U",
unformat_vnet_sw_interface, vnm, &tun_sw_if_index))
;
else
{
error = clib_error_return (0, "Input error");
goto done;
}
}
if (0 == vec_len (allowed_ips))
{
error = clib_error_return (0, "Allowed IPs are not specified");
goto done;
}
rv = wg_peer_add (tun_sw_if_index, public_key, table_id, &ip_addr_46 (&ip),
allowed_ips, portDst, persistent_keepalive, &peer_index);
switch (rv)
{
case VNET_API_ERROR_KEY_LENGTH:
error = clib_error_return (0, "Error parsing public key");
break;
case VNET_API_ERROR_ENTRY_ALREADY_EXISTS:
error = clib_error_return (0, "Peer already exist");
break;
case VNET_API_ERROR_INVALID_SW_IF_INDEX:
error = clib_error_return (0, "Tunnel is not specified");
break;
case VNET_API_ERROR_LIMIT_EXCEEDED:
error = clib_error_return (0, "Max peers limit");
break;
case VNET_API_ERROR_INIT_FAILED:
error = clib_error_return (0, "wireguard device parameters is not set");
break;
case VNET_API_ERROR_INVALID_PROTOCOL:
error = clib_error_return (0, "ipv6 not supported yet");
break;
}
done:
vec_free (public_key_64);
vec_free (allowed_ips);
unformat_free (line_input);
return error;
}
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (wg_peer_add_command, static) = {
.path = "wireguard peer add",
.short_help =
"wireguard peer add <wg_int> public-key <pub_key_other> "
"endpoint <ip4_dst> allowed-ip <prefix> "
"dst-port [port_dst] persistent-keepalive [keepalive_interval]",
.function = wg_peer_add_command_fn,
};
/* *INDENT-ON* */
static clib_error_t *
wg_peer_remove_command_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
wg_main_t *wmp = &wg_main;
clib_error_t *error = NULL;
u32 peer_index;
int rv;
unformat_input_t _line_input, *line_input = &_line_input;
if (!unformat_user (input, unformat_line_input, line_input))
return 0;
wg_feature_init (wmp);
if (unformat (line_input, "%d", &peer_index))
;
else
{
error = clib_error_return (0, "Input error");
goto done;
}
rv = wg_peer_remove (peer_index);
switch (rv)
{
case VNET_API_ERROR_KEY_LENGTH:
error = clib_error_return (0, "Error parsing public key");
break;
}
done:
unformat_free (line_input);
return error;
}
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (wg_peer_remove_command, static) =
{
.path = "wireguard peer remove",
.short_help = "wireguard peer remove <index>",
.function = wg_peer_remove_command_fn,
};
/* *INDENT-ON* */
static walk_rc_t
wg_peer_show_one (index_t peeri, void *arg)
{
vlib_cli_output (arg, "%U", format_wg_peer, peeri);
return (WALK_CONTINUE);
}
static clib_error_t *
wg_show_peer_command_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
wg_peer_walk (wg_peer_show_one, vm);
return NULL;
}
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (wg_show_peers_command, static) =
{
.path = "show wireguard peer",
.short_help = "show wireguard peer",
.function = wg_show_peer_command_fn,
};
/* *INDENT-ON* */
static walk_rc_t
wg_if_show_one (index_t itfi, void *arg)
{
vlib_cli_output (arg, "%U", format_wg_if, itfi);
return (WALK_CONTINUE);
}
static clib_error_t *
wg_show_if_command_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
wg_main_t *wmp = &wg_main;
wg_feature_init (wmp);
wg_if_walk (wg_if_show_one, vm);
return NULL;
}
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (wg_show_itfs_command, static) =
{
.path = "show wireguard interface",
.short_help = "show wireguard",
.function = wg_show_if_command_fn,
};
static clib_error_t *
wg_set_async_mode_command_fn (vlib_main_t *vm, unformat_input_t *input,
vlib_cli_command_t *cmd)
{
unformat_input_t _line_input, *line_input = &_line_input;
int async_enable = 0;
if (!unformat_user (input, unformat_line_input, line_input))
return 0;
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (line_input, "on"))
async_enable = 1;
else if (unformat (line_input, "off"))
async_enable = 0;
else
return (clib_error_return (0, "unknown input '%U'",
format_unformat_error, line_input));
}
wg_set_async_mode (async_enable);
unformat_free (line_input);
return (NULL);
}
VLIB_CLI_COMMAND (wg_set_async_mode_command, static) = {
.path = "set wireguard async mode",
.short_help = "set wireguard async mode on|off",
.function = wg_set_async_mode_command_fn,
};
static clib_error_t *
wg_show_mode_command_fn (vlib_main_t *vm, unformat_input_t *input,
vlib_cli_command_t *cmd)
{
vlib_cli_output (vm, "Wireguard mode");
#define _(v, f, s) \
vlib_cli_output (vm, "\t%s: %s", s, \
(wg_op_mode_is_set_##f () ? "enabled" : "disabled"));
foreach_wg_op_mode_flags
#undef _
return (NULL);
}
VLIB_CLI_COMMAND (wg_show_modemode_command, static) = {
.path = "show wireguard mode",
.short_help = "show wireguard mode",
.function = wg_show_mode_command_fn,
};
/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/
|
a3cf54559be8244757f9931e2c7a9ede39e01a02
|
aa3befea459382dc5c01c925653d54f435b3fb0f
|
/drivers/sensors/hc_sr04.c
|
859f36a16b6cedbca570783748f8e82afe86df6b
|
[
"MIT-open-group",
"BSD-3-Clause",
"HPND-sell-variant",
"BSD-4-Clause-UC",
"LicenseRef-scancode-warranty-disclaimer",
"MIT-0",
"LicenseRef-scancode-bsd-atmel",
"LicenseRef-scancode-gary-s-brown",
"LicenseRef-scancode-proprietary-license",
"SunPro",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-other-permissive",
"HPND",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"GPL-1.0-or-later",
"CC-BY-2.0",
"CC-BY-4.0"
] |
permissive
|
apache/nuttx
|
14519a7bff4a87935d94fb8fb2b19edb501c7cec
|
606b6d9310fb25c7d92c6f95bf61737e3c79fa0f
|
refs/heads/master
| 2023-08-25T06:55:45.822534
| 2023-08-23T16:03:31
| 2023-08-24T21:25:47
| 228,103,273
| 407
| 241
|
Apache-2.0
| 2023-09-14T18:26:05
| 2019-12-14T23:27:55
|
C
|
UTF-8
|
C
| false
| false
| 10,925
|
c
|
hc_sr04.c
|
/****************************************************************************
* drivers/sensors/hc_sr04.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <assert.h>
#include <debug.h>
#include <stdio.h>
#include <fcntl.h>
#include <poll.h>
#include <errno.h>
#include <nuttx/arch.h>
#include <nuttx/irq.h>
#include <nuttx/kmalloc.h>
#include <nuttx/mutex.h>
#include <nuttx/signal.h>
#include <nuttx/random.h>
#include <nuttx/sensors/hc_sr04.h>
/****************************************************************************
* Pre-Processor Definitions
****************************************************************************/
#ifdef CONFIG_HCSR04_DEBUG
# define hcsr04_dbg(x, ...) _info(x, ##__VA_ARGS__)
#else
# define hcsr04_dbg(x, ...) sninfo(x, ##__VA_ARGS__)
#endif
/****************************************************************************
* Private Function Prototypes
****************************************************************************/
static int hcsr04_open(FAR struct file *filep);
static int hcsr04_close(FAR struct file *filep);
static ssize_t hcsr04_read(FAR struct file *filep, FAR char *buffer,
size_t buflen);
static ssize_t hcsr04_write(FAR struct file *filep, FAR const char *buffer,
size_t buflen);
static int hcsr04_ioctl(FAR struct file *filep, int cmd, unsigned long arg);
static int hcsr04_poll(FAR struct file *filep, FAR struct pollfd *fds,
bool setup);
/****************************************************************************
* Private Types
****************************************************************************/
struct hcsr04_dev_s
{
FAR struct hcsr04_config_s *config;
mutex_t devlock;
sem_t conv_donesem;
int time_start_pulse;
int time_finish_pulse;
volatile bool rising;
struct pollfd *fds[CONFIG_HCSR04_NPOLLWAITERS];
};
/****************************************************************************
* Private Data
****************************************************************************/
static const struct file_operations g_hcsr04ops =
{
hcsr04_open, /* open */
hcsr04_close, /* close */
hcsr04_read, /* read */
hcsr04_write, /* write */
NULL, /* seek */
hcsr04_ioctl, /* ioctl */
NULL, /* mmap */
NULL, /* truncate */
hcsr04_poll /* poll */
};
/****************************************************************************
* Private Functions
****************************************************************************/
static int hcsr04_read_distance(FAR struct hcsr04_dev_s *priv)
{
int done;
nxsem_get_value(&priv->conv_donesem, &done);
if (done == 0)
{
return (priv->time_finish_pulse - priv->time_start_pulse);
}
else
{
return -EAGAIN;
}
}
static int hcsr04_start_measuring(FAR struct hcsr04_dev_s *priv)
{
/* Configure the interruption */
priv->rising = true;
priv->config->irq_setmode(priv->config, priv->rising);
priv->config->irq_enable(priv->config, true);
/* Send to 10uS trigger pulse */
priv->config->set_trigger(priv->config, true);
nxsig_usleep(10);
priv->config->set_trigger(priv->config, false);
return 0;
}
static int hcsr04_open(FAR struct file *filep)
{
FAR struct inode *inode = filep->f_inode;
FAR struct hcsr04_dev_s *priv = inode->i_private;
int ret;
ret = nxmutex_lock(&priv->devlock);
if (ret < 0)
{
return ret;
}
nxmutex_unlock(&priv->devlock);
hcsr04_dbg("OPENED\n");
return OK;
}
static int hcsr04_close(FAR struct file *filep)
{
FAR struct inode *inode = filep->f_inode;
FAR struct hcsr04_dev_s *priv = inode->i_private;
int ret;
ret = nxmutex_lock(&priv->devlock);
if (ret < 0)
{
return ret;
}
nxmutex_unlock(&priv->devlock);
hcsr04_dbg("CLOSED\n");
return OK;
}
static ssize_t hcsr04_read(FAR struct file *filep, FAR char *buffer,
size_t buflen)
{
FAR struct inode *inode = filep->f_inode;
FAR struct hcsr04_dev_s *priv = inode->i_private;
int distance = 0;
ssize_t length = 0;
int ret;
/* Get exclusive access */
ret = nxmutex_lock(&priv->devlock);
if (ret < 0)
{
return (ssize_t)ret;
}
/* Setup and send a pulse to start measuring */
hcsr04_start_measuring(priv);
/* Wait the conversion to finish */
/* Get exclusive access */
ret = nxsem_wait_uninterruptible(&priv->conv_donesem);
if (ret < 0)
{
return (ssize_t)ret;
}
distance = hcsr04_read_distance(priv);
if (distance < 0)
{
hcsr04_dbg("failed to read the distance\n");
}
else
{
/* This interface is mainly intended for easy debugging in nsh. */
length = snprintf(buffer, buflen, "%d\n", distance);
if (length > buflen)
{
length = buflen;
}
}
nxmutex_unlock(&priv->devlock);
return length;
}
static ssize_t hcsr04_write(FAR struct file *filep, FAR const char *buffer,
size_t buflen)
{
ssize_t length = 0;
return length;
}
static int hcsr04_ioctl(FAR struct file *filep, int cmd, unsigned long arg)
{
FAR struct inode *inode = filep->f_inode;
FAR struct hcsr04_dev_s *priv = inode->i_private;
int ret = OK;
/* Get exclusive access */
ret = nxmutex_lock(&priv->devlock);
if (ret < 0)
{
return ret;
}
switch (cmd)
{
case SNIOC_START_CONVERSION:
ret = hcsr04_start_measuring(priv);
break;
case SNIOC_READ_RAW_DATA:
break;
#ifdef CONFIG_HCSR04_DEBUG
case SNIOC_DUMP_REGS:
ret = hcsr04_dump_registers(priv);
break;
#endif
default:
ret = -ENOTTY;
break;
}
nxmutex_unlock(&priv->devlock);
return ret;
}
static bool hcsr04_sample(FAR struct hcsr04_dev_s *priv)
{
int done;
nxsem_get_value(&priv->conv_donesem, &done);
return (done == 0);
}
static int hcsr04_poll(FAR struct file *filep, FAR struct pollfd *fds,
bool setup)
{
FAR struct inode *inode;
FAR struct hcsr04_dev_s *priv;
uint32_t flags;
int ret = OK;
int i;
DEBUGASSERT(filep && fds);
inode = filep->f_inode;
DEBUGASSERT(inode && inode->i_private);
priv = (FAR struct hcsr04_dev_s *)inode->i_private;
/* Get exclusive access */
ret = nxmutex_lock(&priv->devlock);
if (ret < 0)
{
return ret;
}
if (setup)
{
/* Ignore waits that do not include POLLIN */
if ((fds->events & POLLIN) == 0)
{
ret = -EDEADLK;
goto out;
}
/* This is a request to set up the poll. Find an available slot for
* the poll structure reference.
*/
for (i = 0; i < CONFIG_HCSR04_NPOLLWAITERS; i++)
{
/* Find an available slot */
if (!priv->fds[i])
{
/* Bind the poll structure and this slot */
priv->fds[i] = fds;
fds->priv = &priv->fds[i];
break;
}
}
if (i >= CONFIG_HCSR04_NPOLLWAITERS)
{
fds->priv = NULL;
ret = -EBUSY;
goto out;
}
flags = enter_critical_section();
if (hcsr04_sample(priv))
{
poll_notify(priv->fds, CONFIG_HCSR04_NPOLLWAITERS, POLLIN);
}
leave_critical_section(flags);
}
else if (fds->priv)
{
/* This is a request to tear down the poll. */
struct pollfd **slot = (struct pollfd **)fds->priv;
DEBUGASSERT(slot != NULL);
/* Remove all memory of the poll setup */
*slot = NULL;
fds->priv = NULL;
}
out:
nxmutex_unlock(&priv->devlock);
return ret;
}
static int hcsr04_int_handler(int irq, FAR void *context, FAR void *arg)
{
FAR struct hcsr04_dev_s *priv = (FAR struct hcsr04_dev_s *)arg;
DEBUGASSERT(priv != NULL);
/* Is this the start of the pulse used to encode the distance ? */
if (priv->rising)
{
/* Get the clock ticks from the free running timer */
priv->time_start_pulse = priv->config->get_clock(priv->config);
/* Now we need to wait for the falling edge interruption */
priv->rising = false;
priv->config->irq_setmode(priv->config, priv->rising);
priv->config->irq_enable(priv->config, true);
}
else
{
/* Get the clock ticks from the free running timer */
priv->time_finish_pulse = priv->config->get_clock(priv->config);
/* Disable interruptions */
priv->config->irq_enable(priv->config, false);
/* Conversion is done */
nxsem_post(&priv->conv_donesem);
}
hcsr04_dbg("HC-SR04 interrupt\n");
poll_notify(priv->fds, CONFIG_HCSR04_NPOLLWAITERS, POLLIN);
return OK;
}
/****************************************************************************
* Public Functions
****************************************************************************/
int hcsr04_register(FAR const char *devpath,
FAR struct hcsr04_config_s *config)
{
int ret = 0;
FAR struct hcsr04_dev_s *priv;
priv = (struct hcsr04_dev_s *)kmm_zalloc(sizeof(struct hcsr04_dev_s));
if (!priv)
{
hcsr04_dbg("Memory cannot be allocated for HC-SR04 sensor");
return -ENOMEM;
}
priv->config = config;
nxmutex_init(&priv->devlock);
nxsem_init(&priv->conv_donesem, 0, 0);
ret = register_driver(devpath, &g_hcsr04ops, 0666, priv);
if (ret < 0)
{
nxmutex_destroy(&priv->devlock);
nxsem_destroy(&priv->conv_donesem);
kmm_free(priv);
hcsr04_dbg("Error occurred during the driver registering = %d\n", ret);
return ret;
}
if (priv->config->irq_clear)
{
priv->config->irq_clear(priv->config);
}
priv->config->irq_attach(priv->config, hcsr04_int_handler, priv);
priv->config->irq_enable(priv->config, false);
return OK;
}
|
605184eb2e2d331164451f5a380d3798164ac107
|
3bd385b466cb035fecd2b0c11ae054d42bf44fc2
|
/src/extended/diagonalbandalign_affinegapcost.c
|
9f6091214f90b6953cdf64ca8e08ab92d0b40bda
|
[
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"BSD-2-Clause",
"LicenseRef-scancode-mit-old-style",
"Zlib",
"MIT",
"BSD-3-Clause",
"bzip2-1.0.6"
] |
permissive
|
genometools/genometools
|
c366dff04f6baa887f6b3be3ec55bce824b2bae1
|
df1df94b8c05a9c9bf848ffc6755c87b58573da5
|
refs/heads/master
| 2023-04-13T13:57:18.748796
| 2023-04-09T21:29:53
| 2023-04-09T21:29:53
| 11,177,980
| 237
| 63
|
NOASSERTION
| 2023-04-09T21:29:54
| 2013-07-04T13:39:38
|
C
|
UTF-8
|
C
| false
| false
| 60,993
|
c
|
diagonalbandalign_affinegapcost.c
|
/*
Copyright (c) 2015 Annika Seidel <annika.seidel@studium.uni-hamburg.de>
Copyright (c) 2015 Center for Bioinformatics, University of Hamburg
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <ctype.h>
#include <string.h>
#include "core/array2dim_api.h"
#include "core/minmax_api.h"
#include "core/types_api.h"
#include "core/divmodmul_api.h"
#include "core/ma_api.h"
#include "extended/affinealign.h"
#include "extended/diagonalbandalign.h"
#include "extended/diagonalbandalign_affinegapcost.h"
#include "extended/linearalign_affinegapcost.h"
#include "extended/linspace_management.h"
#include "extended/reconstructalignment.h"
#define LINEAR_EDIST_GAP ((GtUchar) UCHAR_MAX)
static void diagonalband_fillDPtab_affine(GtAffinealignDPentry **Atabcolumn,
const GtUchar *useq,
GtUword ustart,
GtUword ulen,
const GtUchar *vseq,
GtUword vstart,
GtUword vlen,
GtWord left_dist,
GtWord right_dist,
GtAffineAlignEdge from_edge,
GtAffineAlignEdge edge,
const GtScoreHandler *scorehandler)
{
GtUword i,j, low_row, high_row, gap_opening, gap_extension;
GtWord rcost, r_dist, d_dist, i_dist, minvalue;
gt_assert(Atabcolumn && scorehandler);
if ((left_dist > GT_MIN(0, (GtWord)vlen-(GtWord)ulen))||
(right_dist < GT_MAX(0, (GtWord)vlen-(GtWord)ulen)))
{
gt_assert(false);
}
gap_opening = gt_scorehandler_get_gap_opening(scorehandler);
gap_extension = gt_scorehandler_get_gapscore(scorehandler);
low_row = 0;
high_row = -left_dist;
/* first entry */
switch (edge) {
case Affine_R:
Atabcolumn[0][0].Rvalue = 0;
Atabcolumn[0][0].Redge = from_edge;
Atabcolumn[0][0].Dvalue = GT_WORD_MAX;
Atabcolumn[0][0].Ivalue = GT_WORD_MAX;
break;
case Affine_D:
Atabcolumn[0][0].Rvalue = GT_WORD_MAX;
Atabcolumn[0][0].Dvalue = 0;
Atabcolumn[0][0].Dedge = from_edge;
Atabcolumn[0][0].Ivalue = GT_WORD_MAX;
break;
case Affine_I:
Atabcolumn[0][0].Rvalue = GT_WORD_MAX;
Atabcolumn[0][0].Dvalue = GT_WORD_MAX;
Atabcolumn[0][0].Ivalue = 0;
Atabcolumn[0][0].Iedge = from_edge;
break;
default:
Atabcolumn[0][0].Rvalue = 0;
Atabcolumn[0][0].Dvalue = gap_opening;
Atabcolumn[0][0].Ivalue = gap_opening;
}
/* first column */
for (i = 1; i <= high_row; i++)
{
Atabcolumn[i][0].Rvalue = GT_WORD_MAX;
r_dist = add_safe_max(Atabcolumn[i-1][0].Rvalue,
gap_opening + gap_extension);
d_dist = add_safe_max(Atabcolumn[i-1][0].Dvalue, gap_extension);
i_dist = add_safe_max(Atabcolumn[i-1][0].Ivalue,
gap_opening + gap_extension);
Atabcolumn[i][0].Dvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[i][0].Ivalue = GT_WORD_MAX;
Atabcolumn[i][0].Redge = Affine_X;
Atabcolumn[i][0].Dedge = gt_linearalign_affinegapcost_set_edge(r_dist,
d_dist,
i_dist);
Atabcolumn[i][0].Iedge = Affine_X;
}
for (; i <= ulen; i++)
{
/* invalid values */
Atabcolumn[i][0].Rvalue = GT_WORD_MAX;
Atabcolumn[i][0].Dvalue = GT_WORD_MAX;
Atabcolumn[i][0].Ivalue = GT_WORD_MAX;
}
/* next columns */
for (j = 1; j <= vlen; j++)
{
/* below diagonal band*/
for (i = 0; i <= low_row; i++)
{
if (j <= right_dist)
{
Atabcolumn[i][j].Redge = Affine_X;
Atabcolumn[i][j].Dedge = Affine_X;
r_dist = add_safe_max(Atabcolumn[i][j-1].Rvalue,
gap_extension + gap_opening);
d_dist = add_safe_max(Atabcolumn[i][j-1].Dvalue,
gap_extension + gap_opening);
i_dist = add_safe_max(Atabcolumn[i][j-1].Ivalue,gap_extension);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[i][j].Ivalue = minvalue;
Atabcolumn[i][j].Rvalue = GT_WORD_MAX;
Atabcolumn[i][j].Dvalue = GT_WORD_MAX;
Atabcolumn[i][j].Iedge = gt_linearalign_affinegapcost_set_edge(r_dist,
d_dist,
i_dist);
}
else{
Atabcolumn[i][j].Rvalue = GT_WORD_MAX;
Atabcolumn[i][j].Dvalue = GT_WORD_MAX;
Atabcolumn[i][j].Ivalue = GT_WORD_MAX;
Atabcolumn[i][j].Iedge = Affine_X;
}
}
if ( j > right_dist)
low_row++;
if (high_row < ulen)
high_row ++;
/* diagonalband */
for (; i <= high_row; i++)
{
/* compute A_affine(i,j,I) */
r_dist=add_safe_max(Atabcolumn[i][j-1].Rvalue,gap_extension+gap_opening);
d_dist=add_safe_max(Atabcolumn[i][j-1].Dvalue,gap_extension+gap_opening);
i_dist=add_safe_max(Atabcolumn[i][j-1].Ivalue,gap_extension);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[i][j].Ivalue = minvalue;
Atabcolumn[i][j].Iedge = gt_linearalign_affinegapcost_set_edge(
r_dist, d_dist, i_dist);
/* compute A_affine(i,j,R) */
rcost = gt_scorehandler_get_replacement(scorehandler,
useq[ustart+i-1], vseq[vstart+j-1]);
r_dist = add_safe_max(Atabcolumn[i-1][j-1].Rvalue, rcost);
d_dist = add_safe_max(Atabcolumn[i-1][j-1].Dvalue, rcost);
i_dist = add_safe_max(Atabcolumn[i-1][j-1].Ivalue, rcost);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[i][j].Rvalue = minvalue;
Atabcolumn[i][j].Redge = gt_linearalign_affinegapcost_set_edge(
r_dist, d_dist, i_dist);
/* compute A_affine(i,j,D) */
r_dist = add_safe_max(Atabcolumn[i-1][j].Rvalue,
gap_extension+gap_opening);
d_dist = add_safe_max(Atabcolumn[i-1][j].Dvalue,gap_extension);
i_dist = add_safe_max(Atabcolumn[i-1][j].Ivalue,
gap_extension+gap_opening);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[i][j].Dvalue = minvalue;
Atabcolumn[i][j].Dedge = gt_linearalign_affinegapcost_set_edge(
r_dist, d_dist, i_dist);
}
/* above diagonal band */
for (; i <= ulen; i++)
{
Atabcolumn[i][j].Rvalue = GT_WORD_MAX;
Atabcolumn[i][j].Dvalue = GT_WORD_MAX;
Atabcolumn[i][j].Ivalue = GT_WORD_MAX;
}
}
}
/* calculate alignment with diagonalband in square space with
* affine gapcosts */
GtWord gt_diagonalbandalign_affinegapcost_in_square_space_generic(
GtLinspaceManagement *space,
const GtScoreHandler *scorehandler,
GtAlignment *align,
const GtUchar *useq,
GtUword ustart,
GtUword ulen,
const GtUchar *vseq,
GtUword vstart,
GtUword vlen,
GtWord left_dist,
GtWord right_dist)
{
GtWord distance;
GtUword idx;
GtAffinealignDPentry **Atabcolumn;
gt_assert(align && scorehandler);
if (space == NULL)
{
gt_array2dim_malloc(Atabcolumn, (ulen+1), (vlen+1));
}
else
{
gt_assert((ulen+1)*(vlen+1)*sizeof(**Atabcolumn) <=
gt_linspace_management_get_valueTabsize(space));
Atabcolumn = gt_linspace_management_get_rTabspace(space);
*Atabcolumn = gt_linspace_management_get_valueTabspace(space);
for (idx=1; idx<ulen+1; idx++)
Atabcolumn[idx]=Atabcolumn[idx-1]+vlen+1;
}
diagonalband_fillDPtab_affine(Atabcolumn, useq, ustart, ulen, vseq, vstart,
vlen, left_dist, right_dist,
Affine_X, Affine_X, scorehandler);
distance = GT_MIN3(Atabcolumn[ulen][vlen].Rvalue,
Atabcolumn[ulen][vlen].Dvalue,
Atabcolumn[ulen][vlen].Ivalue);
/* reconstruct alignment from 2dimarray Atabcolumn */
gt_affinealign_traceback(align, Atabcolumn, ulen, vlen);
if (space == NULL)
{
gt_array2dim_delete(Atabcolumn);
}
return distance;
}
/* calculate alignment with diagonalband in square space with
* affine gapcosts */
GtWord gt_diagonalbandalign_affinegapcost_in_square_space(
GtLinspaceManagement *space,
GtAlignment *align,
const GtUchar *useq,
GtUword ustart,
GtUword ulen,
const GtUchar *vseq,
GtUword vstart,
GtUword vlen,
GtWord left_dist,
GtWord right_dist,
GtUword matchcost,
GtUword mismatchcost,
GtUword gap_opening,
GtUword gap_extension)
{
GtWord distance;
GtScoreHandler *scorehandler = gt_scorehandler_new(matchcost,mismatchcost,
gap_opening, gap_extension);
distance = gt_diagonalbandalign_affinegapcost_in_square_space_generic(space,
scorehandler, align, useq, ustart, ulen,
vseq, vstart, vlen, left_dist, right_dist);
gt_scorehandler_delete(scorehandler);
return distance;
}
/* calculate only distance with diagonalband in square space with
* affine gapcosts */
GtWord gt_diagonalbandalign_affinegapcost_square_space_distance_only(
const GtUchar *useq,
GtUword ustart,
GtUword ulen,
const GtUchar *vseq,
GtUword vstart,
GtUword vlen,
GtWord left_dist,
GtWord right_dist,
const GtScoreHandler
*scorehandler)
{
GtUword distance;
GtAffinealignDPentry **Atabcolumn;
gt_assert(scorehandler);
if ((left_dist > GT_MIN(0, (GtWord)vlen-(GtWord)ulen))||
(right_dist < GT_MAX(0, (GtWord)vlen-(GtWord)ulen)))
{
return GT_WORD_MAX;
}
gt_array2dim_malloc(Atabcolumn, (ulen+1), (vlen+1));
diagonalband_fillDPtab_affine(Atabcolumn, useq, ustart, ulen, vseq, vstart,
vlen, left_dist, right_dist,
Affine_X, Affine_X, scorehandler);
distance = GT_MIN3(Atabcolumn[ulen][vlen].Rvalue,
Atabcolumn[ulen][vlen].Dvalue,
Atabcolumn[ulen][vlen].Ivalue);
gt_array2dim_delete(Atabcolumn);
return distance;
}
static GtAffineAlignRnode evaluate_affineDBcrosspoints_from_2dimtab(
GtAffineDiagAlignentry *Dtab,
GtAffinealignDPentry **Atabcolumn,
GtUword ulen, GtUword vlen,
GtUword gap_opening,
GtUword rowoffset,
GtAffineAlignEdge from_edge,
GtAffineAlignEdge edge)
{
GtUword i, j;
GtAffineAlignRnode rnode;
GtDiagAlignentry *tempnode;
gt_assert(Atabcolumn != NULL);
i = ulen;
j = vlen;
edge = gt_linearalign_affinegapcost_minAdditionalCosts(&Atabcolumn[i][j],
edge, gap_opening);
switch (edge)
{
case Affine_I:
tempnode = &Dtab[vlen].val_I;
rnode = (GtAffineAlignRnode) {vlen, Affine_I};
break;
case Affine_D:
tempnode = &Dtab[vlen].val_D;
rnode = (GtAffineAlignRnode) {vlen, Affine_D};
break;
default:
tempnode = &Dtab[vlen].val_R;
rnode = (GtAffineAlignRnode) {vlen, Affine_R};
}
while (i > 0 || j > 0) {
if (j == vlen)
rnode.edge = edge;
switch (edge) {
case Affine_R:
gt_assert(Atabcolumn[i][j].Rvalue != GT_WORD_MAX);
Dtab[j].val_R.currentrowindex = i + rowoffset;
edge = Atabcolumn[i][j].Redge;
tempnode->last_type = Affine_R;
tempnode = &Dtab[j].val_R;
gt_assert(i > 0 && j > 0);
i--;
j--;
break;
case Affine_D:
edge = Atabcolumn[i][j].Dedge;
gt_assert(i);
i--;
break;
case Affine_I:
Dtab[j].val_I.currentrowindex = i + rowoffset;
edge = Atabcolumn[i][j].Iedge;
tempnode->last_type = Affine_I;
tempnode = &Dtab[j].val_I;
gt_assert(j);
j--;
break;
default:
gt_assert(false);
}
}
tempnode->last_type = edge;
/* special case for first crosspoint */
Dtab[0].val_R = (GtDiagAlignentry) {GT_UWORD_MAX, rowoffset, from_edge};
Dtab[0].val_D = (GtDiagAlignentry) {GT_UWORD_MAX, rowoffset, from_edge};
Dtab[0].val_I = (GtDiagAlignentry) {GT_UWORD_MAX, rowoffset, from_edge};
return rnode;
}
/* create affine DBcrosspointtab to combine square calculating with linear
* calculating. from_edge describes type of crosspoint node, edge describes the
* incoming way to next unkonown crosspoint and to_edge describes type of
* previous crosspoint.
* Returns edge and index of lastcrosspoint in matrix.
*/
static GtAffineAlignRnode affineDtab_in_square_space(
GtLinspaceManagement *space,
GtAffineDiagAlignentry *Dtab,
const GtUchar *useq,
GtUword ustart,
GtUword ulen,
const GtUchar *vseq,
GtUword vstart,
GtUword vlen,
GtWord left_dist,
GtWord right_dist,
GtUword rowoffset,
GtAffineAlignEdge from_edge,
GtAffineAlignEdge edge,
GtAffineAlignEdge to_edge,
const GtScoreHandler
*scorehandler)
{
GtAffinealignDPentry **Atabcolumn;
GtUword idx, gap_opening;
gt_assert(Dtab && space && scorehandler);
gt_assert((ulen+1)*(vlen+1)*sizeof(**Atabcolumn) <=
gt_linspace_management_get_valueTabsize(space));
Atabcolumn = gt_linspace_management_get_rTabspace(space);
*Atabcolumn = gt_linspace_management_get_valueTabspace(space);
for (idx=1;idx<ulen+1;idx++)
Atabcolumn[idx]=Atabcolumn[idx-1]+vlen+1;
diagonalband_fillDPtab_affine(Atabcolumn, useq, ustart, ulen, vseq, vstart,
vlen, left_dist, right_dist,
from_edge, edge, scorehandler);
gap_opening = gt_scorehandler_get_gap_opening(scorehandler);
GtAffineAlignRnode rnode = evaluate_affineDBcrosspoints_from_2dimtab(Dtab,
Atabcolumn, ulen, vlen, gap_opening,
rowoffset, from_edge, to_edge);
return rnode;
}
/* calculate only distance with diagonalband in linear space O(n)
* with affine gapcosts */
static GtWord diagonalband_linear_affine(const GtUchar *useq,
GtUword ustart,
GtUword ulen,
const GtUchar *vseq,
GtUword vstart,
GtUword vlen,
GtWord left_dist,
GtWord right_dist,
const GtScoreHandler *scorehandler)
{
GtUword colindex, rowindex, low_row, high_row, width,
gap_opening, gap_extension;
GtWord distance, rcost, r_dist, d_dist, i_dist, minvalue;
GtAffinealignDPentry *Atabcolumn, northwestAffinealignDPentry,
westAffinealignDPentry;
bool last_row = false;
distance = GT_WORD_MAX;
if ((left_dist > GT_MIN(0, (GtWord)vlen-(GtWord)ulen))||
(right_dist < GT_MAX(0, (GtWord)vlen-(GtWord)ulen)))
{
gt_assert(false);
}
gt_assert(scorehandler);
gap_opening = gt_scorehandler_get_gap_opening(scorehandler);
gap_extension = gt_scorehandler_get_gapscore(scorehandler);
width = right_dist - left_dist + 1;
Atabcolumn = gt_malloc(sizeof(*Atabcolumn) * width);
low_row = 0;
high_row = -left_dist;
Atabcolumn[low_row].Rvalue = 0;
Atabcolumn[low_row].Dvalue = gap_opening;
Atabcolumn[low_row].Ivalue = gap_opening;
for (rowindex = low_row+1; rowindex <= high_row; rowindex ++)
{
Atabcolumn[rowindex-low_row].Rvalue = GT_WORD_MAX;
Atabcolumn[rowindex-low_row].Dvalue = add_safe_max(
Atabcolumn[rowindex-low_row-1].Dvalue,
gap_extension);
Atabcolumn[rowindex-low_row].Ivalue = GT_WORD_MAX;
}
if (high_row == ulen)
last_row = true;
for (colindex = 1; colindex <= vlen; colindex++)
{
northwestAffinealignDPentry = Atabcolumn[0];
if (colindex > right_dist)
{
if (low_row != high_row)
westAffinealignDPentry = Atabcolumn[1];
low_row++;
}
else
westAffinealignDPentry = Atabcolumn[0];
if (high_row < ulen)
high_row ++;
if (!last_row && rowindex == high_row)
{
westAffinealignDPentry.Rvalue = GT_WORD_MAX;
westAffinealignDPentry.Dvalue = GT_WORD_MAX;
westAffinealignDPentry.Ivalue = GT_WORD_MAX;
}
r_dist = add_safe_max(westAffinealignDPentry.Rvalue,
gap_extension+gap_opening);
d_dist = add_safe_max(westAffinealignDPentry.Dvalue,
gap_extension+gap_opening);
i_dist = add_safe_max(westAffinealignDPentry.Ivalue,gap_extension);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[0].Ivalue = minvalue;
Atabcolumn[0].Rvalue = GT_WORD_MAX;
Atabcolumn[0].Dvalue = GT_WORD_MAX;
if (low_row > 0 )
{
rcost = gt_scorehandler_get_replacement(scorehandler,
useq[ustart+rowindex-1],vseq[vstart+colindex-1]);
r_dist = add_safe_max(northwestAffinealignDPentry.Rvalue, rcost);
d_dist = add_safe_max(northwestAffinealignDPentry.Dvalue, rcost);
i_dist = add_safe_max(northwestAffinealignDPentry.Ivalue, rcost);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[0].Rvalue = minvalue;
}
for (rowindex = low_row + 1; rowindex <= high_row; rowindex++)
{
northwestAffinealignDPentry = westAffinealignDPentry;
if (!last_row && rowindex == high_row)
{
westAffinealignDPentry.Rvalue = GT_WORD_MAX;
westAffinealignDPentry.Dvalue = GT_WORD_MAX;
westAffinealignDPentry.Ivalue = GT_WORD_MAX;
}
else if (low_row > 0)
westAffinealignDPentry = Atabcolumn[rowindex-low_row+1];
else
westAffinealignDPentry = Atabcolumn[rowindex-low_row];
if (rowindex == ulen)
last_row = true;
r_dist = add_safe_max(westAffinealignDPentry.Rvalue,
gap_extension+gap_opening);
d_dist = add_safe_max(westAffinealignDPentry.Dvalue,
gap_extension+gap_opening);
i_dist = add_safe_max(westAffinealignDPentry.Ivalue,gap_extension);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[rowindex-low_row].Ivalue = minvalue;
rcost = gt_scorehandler_get_replacement(scorehandler,
useq[ustart+rowindex-1], vseq[vstart+colindex-1]);
r_dist = add_safe_max(northwestAffinealignDPentry.Rvalue, rcost);
d_dist = add_safe_max(northwestAffinealignDPentry.Dvalue, rcost);
i_dist = add_safe_max(northwestAffinealignDPentry.Ivalue, rcost);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[rowindex-low_row].Rvalue = minvalue;
r_dist = add_safe_max(Atabcolumn[rowindex-low_row-1].Rvalue,
gap_extension+gap_opening);
d_dist = add_safe_max(Atabcolumn[rowindex-low_row-1].Dvalue,
gap_extension);
i_dist = add_safe_max(Atabcolumn[rowindex-low_row-1].Ivalue,
gap_extension+gap_opening);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[rowindex-low_row].Dvalue = minvalue;
}
}
distance = GT_MIN3(Atabcolumn[high_row-low_row].Rvalue,
Atabcolumn[high_row-low_row].Dvalue,
Atabcolumn[high_row-low_row].Ivalue);
gt_free(Atabcolumn);
return distance;
}
/* helpfunctions */
static void inline set_invalid_Diagentry(GtDiagAlignentry *node)
{
gt_assert(node != NULL);
node->currentrowindex = GT_UWORD_MAX;
node->last_type = Affine_X;
node->lastcpoint = GT_UWORD_MAX;
}
static void inline set_valid_Diagentry(GtDiagAlignentry *node_to,
const GtAffineAlignRtabentry *entry_from,
GtWord minvalue, GtWord r_dist,
GtWord i_dist, GtWord d_dist)
{
gt_assert(node_to != NULL && entry_from != NULL);
if (minvalue == r_dist)
{
node_to->last_type = entry_from->val_R.edge;
node_to->lastcpoint = entry_from->val_R.idx;
}
else if (minvalue == i_dist)
{
node_to->last_type = entry_from->val_I.edge;
node_to->lastcpoint = entry_from->val_I.idx;
}
else if (minvalue == d_dist)
{
node_to->last_type = entry_from->val_D.edge;
node_to->lastcpoint = entry_from->val_D.idx;
}
}
static void inline set_invalid_Rnode(GtAffineAlignRnode *node)
{
gt_assert(node != NULL);
node->idx = GT_UWORD_MAX;
node->edge = Affine_X;
}
static void inline set_valid_Rnode(GtAffineAlignRnode *node_to,
GtAffineAlignRtabentry *entry_from,
GtWord minvalue, GtWord r_dist,
GtWord i_dist, GtWord d_dist)
{
gt_assert(node_to != NULL && entry_from != NULL);
if (minvalue == r_dist)
*node_to = entry_from->val_R;
else if (minvalue == i_dist)
*node_to = entry_from->val_I;
else if (minvalue == d_dist)
*node_to = entry_from->val_D;
}
/* calculate first column */
static void firstaffineDBtabcolumn(GtAffinealignDPentry *Atabcolumn,
GtAffineAlignRtabentry *Rtabcolumn,
GtAffineDiagAlignentry *Diagcolumn,
GtAffineAlignEdge edge,
GtAffineAlignEdge from_edge,
GtUword offset,
GtWord left_dist,
GtWord right_dist,
GtUword gap_opening,
GtUword gap_extension)
{
GtUword rowindex, low_row, high_row;
GtWord diag;
diag = GT_DIV2(left_dist + right_dist);
low_row = 0;
high_row = -left_dist;
Atabcolumn[low_row].Rvalue = GT_WORD_MAX;
Atabcolumn[low_row].Dvalue = GT_WORD_MAX;
Atabcolumn[low_row].Ivalue = GT_WORD_MAX;
set_invalid_Diagentry(&Diagcolumn[0].val_R);
set_invalid_Diagentry(&Diagcolumn[0].val_D);
set_invalid_Diagentry(&Diagcolumn[0].val_I);
set_invalid_Rnode(&Rtabcolumn[0].val_R);
set_invalid_Rnode(&Rtabcolumn[0].val_D);
set_invalid_Rnode(&Rtabcolumn[0].val_I);
switch (edge) {
case Affine_R:
Atabcolumn[low_row].Rvalue = 0;
Rtabcolumn[0].val_R.edge = from_edge;
if (diag == 0)
{
Diagcolumn[0].val_R.currentrowindex = 0 + offset;
Diagcolumn[0].val_R.last_type = from_edge;
Rtabcolumn[0].val_R.idx = 0;
Rtabcolumn[0].val_R.edge = Affine_R;
}
break;
case Affine_D:
Atabcolumn[low_row].Dvalue = 0;
Rtabcolumn[0].val_D.edge = from_edge;
if (diag == 0)
{
Diagcolumn[0].val_D.currentrowindex = 0 + offset;
Diagcolumn[0].val_D.last_type = from_edge;
Rtabcolumn[0].val_D.idx = 0;
Rtabcolumn[0].val_D.edge = Affine_D;
}
break;
case Affine_I:
Atabcolumn[low_row].Ivalue = 0;
Rtabcolumn[0].val_I.edge = from_edge;
if (diag == 0)
{
Diagcolumn[0].val_I.currentrowindex = 0 + offset;
Diagcolumn[0].val_I.last_type = from_edge;
Rtabcolumn[0].val_I.idx = 0;
Rtabcolumn[0].val_I.edge = Affine_I;
}
break;
default:
Atabcolumn[low_row].Rvalue = 0;
Atabcolumn[low_row].Dvalue = gap_opening;
Atabcolumn[low_row].Ivalue = gap_opening;
Rtabcolumn[0].val_I.edge = from_edge;
Rtabcolumn[0].val_R.edge = from_edge;
Rtabcolumn[0].val_D.edge = from_edge;
if (diag == 0)
{
Diagcolumn[0].val_R.currentrowindex = 0 + offset;
Diagcolumn[0].val_D.currentrowindex = 0 + offset;
Diagcolumn[0].val_I.currentrowindex = 0 + offset;
Rtabcolumn[0].val_R.idx = 0;
Rtabcolumn[0].val_R.edge = Affine_R;
Rtabcolumn[0].val_D.idx = 0;
Rtabcolumn[0].val_D.edge = Affine_D;
Rtabcolumn[0].val_I.idx = 0;
Rtabcolumn[0].val_I.edge = Affine_I;
}
}
for (rowindex = low_row+1; rowindex <= high_row; rowindex++)
{
Atabcolumn[rowindex-low_row].Rvalue = GT_WORD_MAX;
Atabcolumn[rowindex-low_row].Dvalue = add_safe_max(
Atabcolumn[rowindex-low_row-1].Dvalue,
gap_extension);
Atabcolumn[rowindex-low_row].Ivalue = GT_WORD_MAX;
if (diag == -(GtWord)rowindex)
{
Diagcolumn[0].val_D.last_type = from_edge;
Diagcolumn[0].val_D.lastcpoint = GT_UWORD_MAX;
Diagcolumn[0].val_D.currentrowindex = rowindex + offset;
Rtabcolumn[rowindex-low_row].val_D.idx = 0;
Rtabcolumn[rowindex-low_row].val_D.edge = Affine_D;
set_invalid_Diagentry(&Diagcolumn[0].val_R);
set_invalid_Diagentry(&Diagcolumn[0].val_I);
}
else
{
Rtabcolumn[rowindex-low_row] = Rtabcolumn[rowindex-low_row-1];
}
}
}
/* calculate all columns */
static GtAffineAlignRnode evaluateallaffineDBcolumns(
GtLinspaceManagement *spacemanager,
GtAffineDiagAlignentry *Diagcolumn,
const GtScoreHandler *scorehandler,
GtAffineAlignEdge edge,
GtAffineAlignEdge from_edge,
GtAffineAlignEdge to_edge,
GtUword offset,
const GtUchar *useq,
GtUword ustart, GtUword ulen,
const GtUchar *vseq,
GtUword vstart, GtUword vlen,
GtWord left_dist, GtWord right_dist)
{
GtUword gap_extension, gap_opening, colindex, rowindex, low_row, high_row;
/*lowest and highest row between a diagonal band*/
GtWord diag, r_dist, d_dist, i_dist, minvalue, rcost;
bool last_row = false;
GtAffinealignDPentry *Atabcolumn, northwestAffinealignDPentry,
westAffinealignDPentry = (GtAffinealignDPentry)
{GT_WORD_MAX, GT_WORD_MAX, GT_WORD_MAX};;
GtAffineAlignRtabentry *Rtabcolumn, northwestRtabentry, westRtabentry = {{0}};
GtAffineAlignRnode lastcpoint = {GT_UWORD_MAX, Affine_X};
if ((left_dist > GT_MIN(0, (GtWord)vlen-(GtWord)ulen))||
(right_dist < GT_MAX(0, (GtWord)vlen-(GtWord)ulen)))
{
gt_assert(false);
}
Atabcolumn = gt_linspace_management_get_valueTabspace(spacemanager);
Rtabcolumn = gt_linspace_management_get_rTabspace(spacemanager);
diag = GT_DIV2(left_dist + right_dist);
low_row = 0;
high_row = -left_dist;
if (high_row == ulen)
last_row = true;
gap_opening = gt_scorehandler_get_gap_opening(scorehandler);
gap_extension = gt_scorehandler_get_gapscore(scorehandler);
/* first column */
firstaffineDBtabcolumn(Atabcolumn, Rtabcolumn, Diagcolumn, edge, from_edge,
offset, left_dist, right_dist, gap_opening, gap_extension);
/* next columns */
for (colindex = 1; colindex <= vlen; colindex++)
{
northwestAffinealignDPentry = Atabcolumn[0];
northwestRtabentry = Rtabcolumn[0];
if (colindex > right_dist)
{
if (low_row != high_row) {
westAffinealignDPentry = Atabcolumn[1];
westRtabentry = Rtabcolumn[1];
}
low_row++;
}
else
{
westAffinealignDPentry = Atabcolumn[0];
westRtabentry = Rtabcolumn[0];
}
if (high_row < ulen)
high_row ++;
if (!last_row && low_row == high_row)
{/* prev is outside of diagonalband*/
westAffinealignDPentry = (GtAffinealignDPentry)
{GT_WORD_MAX, GT_WORD_MAX, GT_WORD_MAX};
westRtabentry.val_R = (GtAffineAlignRnode) {GT_UWORD_MAX, Affine_X};
westRtabentry.val_D = (GtAffineAlignRnode) {GT_UWORD_MAX, Affine_X};
westRtabentry.val_I = (GtAffineAlignRnode) {GT_UWORD_MAX, Affine_X};
}
/* insertion */
r_dist = add_safe_max(westAffinealignDPentry.Rvalue,
gap_extension + gap_opening);
d_dist = add_safe_max(westAffinealignDPentry.Dvalue,
gap_extension + gap_opening);
i_dist = add_safe_max(westAffinealignDPentry.Ivalue, gap_extension);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[0].Ivalue = minvalue;
Atabcolumn[0].Rvalue = GT_WORD_MAX;
Atabcolumn[0].Dvalue = GT_WORD_MAX;
if (diag == (GtWord)colindex - (GtWord)low_row)
{
set_invalid_Diagentry(&Diagcolumn[colindex].val_R);
set_invalid_Diagentry(&Diagcolumn[colindex].val_D);
set_valid_Diagentry(&Diagcolumn[colindex].val_I, &westRtabentry, minvalue,
r_dist, i_dist, d_dist);
Diagcolumn[colindex].val_I.currentrowindex = low_row + offset;
set_invalid_Rnode(&Rtabcolumn[0].val_R);
set_invalid_Rnode(&Rtabcolumn[0].val_D);
Rtabcolumn[0].val_I.idx = colindex;
Rtabcolumn[0].val_I.edge = Affine_I;
}
else
{
set_valid_Rnode(&Rtabcolumn[0].val_I, &westRtabentry, minvalue,
r_dist, i_dist, d_dist);
Rtabcolumn[0].val_D = (GtAffineAlignRnode) {GT_UWORD_MAX, Affine_X};
Rtabcolumn[0].val_R = (GtAffineAlignRnode) {GT_UWORD_MAX, Affine_X};
}
/* replacement possible for 0-entry */
if (low_row > 0 )
{
rcost = gt_scorehandler_get_replacement(scorehandler,
useq[ustart+low_row-1], vseq[vstart+colindex-1]);
r_dist = add_safe_max(northwestAffinealignDPentry.Rvalue, rcost);
d_dist = add_safe_max(northwestAffinealignDPentry.Dvalue, rcost);
i_dist = add_safe_max(northwestAffinealignDPentry.Ivalue, rcost);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[0].Rvalue = minvalue;
if (diag == (GtWord)colindex - (GtWord)low_row)
{
set_valid_Diagentry(&Diagcolumn[colindex].val_R, &northwestRtabentry,
minvalue, r_dist, i_dist, d_dist);
Diagcolumn[colindex].val_R.currentrowindex = low_row + offset;
Rtabcolumn[0].val_R.idx = colindex;
Rtabcolumn[0].val_R.edge = Affine_R;
}
else
{
set_valid_Rnode(&Rtabcolumn[0].val_R, &northwestRtabentry,
minvalue, r_dist, i_dist, d_dist);
}
}
for (rowindex = low_row + 1; rowindex <= high_row; rowindex++)
{
northwestAffinealignDPentry = westAffinealignDPentry;
northwestRtabentry = westRtabentry;
if (!last_row && rowindex == high_row)
{/* prev is outside of diagonalband*/
westAffinealignDPentry = (GtAffinealignDPentry)
{GT_WORD_MAX, GT_WORD_MAX, GT_WORD_MAX};
westRtabentry.val_R = (GtAffineAlignRnode) {GT_UWORD_MAX, Affine_X};
westRtabentry.val_D = (GtAffineAlignRnode) {GT_UWORD_MAX, Affine_X};
westRtabentry.val_I = (GtAffineAlignRnode) {GT_UWORD_MAX, Affine_X};
}
else if (low_row > 0)
{/* shifted diagonalband*/
westAffinealignDPentry = Atabcolumn[rowindex-low_row+1];
westRtabentry = Rtabcolumn[rowindex-low_row+1];
}
else
{/* normaly prev*/
westAffinealignDPentry = Atabcolumn[rowindex-low_row];
westRtabentry = Rtabcolumn[rowindex-low_row];
}
if (rowindex == ulen)
last_row = true;
/* insertion */
r_dist = add_safe_max(westAffinealignDPentry.Rvalue,
gap_extension+gap_opening);
d_dist = add_safe_max(westAffinealignDPentry.Dvalue,
gap_extension+gap_opening);
i_dist = add_safe_max(westAffinealignDPentry.Ivalue,gap_extension);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[rowindex-low_row].Ivalue = minvalue;
if (diag == (GtWord)colindex - (GtWord)rowindex)
{
set_valid_Diagentry(&Diagcolumn[colindex].val_I, &westRtabentry,
minvalue, r_dist, i_dist, d_dist);
Diagcolumn[colindex].val_I.currentrowindex = rowindex+offset;
Rtabcolumn[rowindex-low_row].val_I.idx = colindex;
Rtabcolumn[rowindex-low_row].val_I.edge = Affine_I;
}
else
{
set_valid_Rnode(&Rtabcolumn[rowindex-low_row].val_I, &westRtabentry,
minvalue,r_dist,i_dist,d_dist);
}
/* replacement */
rcost = gt_scorehandler_get_replacement(scorehandler,
useq[ustart+rowindex-1], vseq[vstart+colindex-1]);
r_dist = add_safe_max(northwestAffinealignDPentry.Rvalue, rcost);
d_dist = add_safe_max(northwestAffinealignDPentry.Dvalue, rcost);
i_dist = add_safe_max(northwestAffinealignDPentry.Ivalue, rcost);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[rowindex-low_row].Rvalue = minvalue;
if (diag == (GtWord)colindex - (GtWord)rowindex)
{
set_valid_Diagentry(&Diagcolumn[colindex].val_R, &northwestRtabentry,
minvalue, r_dist, i_dist, d_dist);
Diagcolumn[colindex].val_R.currentrowindex = rowindex+offset;
Rtabcolumn[rowindex-low_row].val_R.idx = colindex;
Rtabcolumn[rowindex-low_row].val_R.edge = Affine_R;
}
else
{
set_valid_Rnode(&Rtabcolumn[rowindex-low_row].val_R,&northwestRtabentry,
minvalue,r_dist,i_dist,d_dist);
}
/* deletion */
r_dist = add_safe_max(Atabcolumn[rowindex-low_row-1].Rvalue,
gap_extension+gap_opening);
d_dist = add_safe_max(Atabcolumn[rowindex-low_row-1].Dvalue,
gap_extension);
i_dist = add_safe_max(Atabcolumn[rowindex-low_row-1].Ivalue,
gap_extension+gap_opening);
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
Atabcolumn[rowindex-low_row].Dvalue = minvalue;
if (diag == (GtWord)colindex - (GtWord)rowindex)
{
set_valid_Diagentry(&Diagcolumn[colindex].val_D,
&Rtabcolumn[rowindex-low_row-1], minvalue,
r_dist,i_dist, d_dist);
Diagcolumn[colindex].val_D.currentrowindex = rowindex+offset;
Rtabcolumn[rowindex-low_row].val_D.idx = colindex;
Rtabcolumn[rowindex-low_row].val_D.edge = Affine_D;
}
else
{
set_valid_Rnode(&Rtabcolumn[rowindex-low_row].val_D,
&Rtabcolumn[rowindex-low_row-1], minvalue,
r_dist,i_dist,d_dist);
}
}
}
/* last crosspoint of optimal path */
r_dist = Atabcolumn[high_row-low_row].Rvalue;
d_dist = Atabcolumn[high_row-low_row].Dvalue;
i_dist = Atabcolumn[high_row-low_row].Ivalue;
switch (to_edge)
{
case Affine_I:
r_dist = add_safe_max (r_dist, gap_opening);
d_dist = add_safe_max (d_dist, gap_opening);
break;
case Affine_D:
r_dist = add_safe_max (r_dist, gap_opening);
i_dist = add_safe_max (i_dist, gap_opening);
break;
default:
break;
}
minvalue = GT_MIN3(r_dist, d_dist, i_dist);
if (minvalue == r_dist)
lastcpoint = Rtabcolumn[high_row-low_row].val_R;
else if (minvalue == i_dist)
lastcpoint = Rtabcolumn[high_row-low_row].val_I;
else if (minvalue == d_dist)
lastcpoint = Rtabcolumn[high_row-low_row].val_D;
return lastcpoint;
}
/* calculate affine crosspoint realting to diagonal in recursive way */
static GtAffineAlignRnode evaluateaffineDBcrosspoints(
GtLinspaceManagement *spacemanager,
GtAffineDiagAlignentry *Diagcolumn,
const GtScoreHandler *scorehandler,
GtAffineAlignEdge edge,
GtAffineAlignEdge from_edge,
GtAffineAlignEdge to_edge,
GtUword rowoffset,
GtUword coloffset,
const GtUchar *useq,
GtUword ustart,
GtUword ulen,
const GtUchar *vseq,
GtUword vstart,
GtUword vlen,
GtWord left_dist,
GtWord right_dist)
{
GtUword i,new_ulen, new_vlen, col_start, col_end,
row_start = 0, row_end;
GtWord new_left, new_right, diag;
GtDiagAlignentry cpoint = {0,0,0}, prevcpoint;
GtAffineDiagAlignentry temp_entry;
GtAffineAlignRnode rpoint, temprpoint, lastrpoint;
GtAffineAlignEdge prevcp_type,cp_type;
diag = GT_DIV2(left_dist+right_dist);
gt_assert(vstart == coloffset);
if (ulen == 0)
{
switch (edge) {
case(Affine_R):
{
Diagcolumn[0].val_R.currentrowindex = rowoffset;
Diagcolumn[0].val_R.last_type = from_edge;
prevcp_type = Affine_R;
break;
}
case(Affine_I):
{
Diagcolumn[0].val_I.currentrowindex = rowoffset;
Diagcolumn[0].val_I.last_type = from_edge;
prevcp_type = Affine_I;
break;
}
case(Affine_D):
{
Diagcolumn[0].val_D.currentrowindex = rowoffset;
Diagcolumn[0].val_D.last_type = from_edge;
prevcp_type = Affine_D;
break;
}
default:
{
Diagcolumn[0].val_I.currentrowindex = rowoffset;
Diagcolumn[0].val_I.last_type = from_edge;
prevcp_type = Affine_I;
}
}
for (i = 1; i <=vlen; i++)
{
Diagcolumn[i].val_I.currentrowindex = rowoffset;
Diagcolumn[i].val_I.last_type = prevcp_type;
prevcp_type = Affine_I;
}
return (GtAffineAlignRnode) {vlen, prevcp_type};
}
if (vlen == 0)
{
switch (edge) {
case(Affine_R):
{
Diagcolumn[0].val_R.currentrowindex = rowoffset;
Diagcolumn[0].val_R.last_type = from_edge;
break;
}
case(Affine_I):
{
Diagcolumn[0].val_I.currentrowindex = rowoffset;
Diagcolumn[0].val_I.last_type = from_edge;
break;
}
case(Affine_D):
{
Diagcolumn[0].val_D.currentrowindex = rowoffset;
Diagcolumn[0].val_D.last_type = from_edge;
break;
}
default:
{
Diagcolumn[0].val_D.currentrowindex = rowoffset;
Diagcolumn[0].val_D.last_type = from_edge;
}
}
return (GtAffineAlignRnode) {0, edge};
}
if (gt_linspace_management_checksquare(spacemanager, ulen, vlen,
sizeof (GtAffinealignDPentry),
sizeof (GtAffineAlignRtabentry)))
{ /* call square function */
return affineDtab_in_square_space(spacemanager, Diagcolumn,
useq, ustart, ulen, vseq, vstart, vlen,
left_dist, right_dist, rowoffset,
from_edge, edge, to_edge,scorehandler);
}
rpoint = evaluateallaffineDBcolumns(spacemanager, Diagcolumn, scorehandler,
edge, from_edge, to_edge, rowoffset,
useq, ustart, ulen, vseq, vstart, vlen,
left_dist, right_dist);
lastrpoint = rpoint;
col_start = rpoint.idx;
cp_type = rpoint.edge;
/* if no crosspoint is found */
if (col_start == GT_UWORD_MAX)
{
gt_assert(diag != 0);
if (diag < 0)
{
return evaluateaffineDBcrosspoints(spacemanager, Diagcolumn, scorehandler,
edge, from_edge, to_edge, rowoffset,
coloffset, useq, ustart, ulen, vseq,
vstart, vlen, diag+1, right_dist);
} else
{
if (diag > 0)
{
return evaluateaffineDBcrosspoints(spacemanager, Diagcolumn,
scorehandler,
edge, from_edge, to_edge, rowoffset,
coloffset, useq, ustart, ulen, vseq,
vstart, vlen, left_dist, diag-1);
}
}
}
else
{
switch (cp_type) {
case Affine_R:
cpoint = Diagcolumn[col_start].val_R;
cp_type = Affine_R;
row_start = Diagcolumn[col_start].val_R.currentrowindex;
break;
case Affine_D:
cpoint = Diagcolumn[col_start].val_D;
cp_type = Affine_D;
row_start = Diagcolumn[col_start].val_D.currentrowindex;
break;
default:
gt_assert(cp_type == Affine_I);
cpoint = Diagcolumn[col_start].val_I;
cp_type = Affine_I;
row_start = Diagcolumn[col_start].val_I.currentrowindex;
break;
}
}
/* exception, if last cpoint != (m+1)entry */
if (col_start != vlen)
{
if (diag + ((GtWord)ulen-(GtWord)vlen) > 0)
{
new_ulen = ulen - (row_start+1-rowoffset);
new_vlen = vlen-col_start;
new_left = GT_MAX((GtWord)left_dist-diag+1,
-(GtWord)new_ulen);
new_right = 0;
temp_entry = Diagcolumn[col_start];
lastrpoint = evaluateaffineDBcrosspoints(spacemanager,
Diagcolumn+col_start, scorehandler, Affine_D,
cpoint.last_type, to_edge,
row_start+1, coloffset+col_start, useq,
row_start+1, new_ulen, vseq,
vstart+col_start, new_vlen, new_left, new_right);
Diagcolumn[col_start] = temp_entry;
Diagcolumn[col_start+1].val_R.last_type = cp_type;
Diagcolumn[col_start+1].val_D.last_type = cp_type;
Diagcolumn[col_start+1].val_I.last_type = cp_type;
lastrpoint.idx += col_start;
}
else
{
new_ulen = ulen - (row_start - rowoffset);
new_vlen = vlen-col_start-1;
new_left = -1;
new_right = GT_MIN((GtWord)right_dist-((GtWord)diag)-1,new_vlen);
lastrpoint = evaluateaffineDBcrosspoints(spacemanager,
Diagcolumn+col_start+1,scorehandler, Affine_I,
cp_type, to_edge, row_start, coloffset+col_start+1,
useq, row_start, new_ulen, vseq, vstart+col_start+1,
new_vlen, new_left, new_right);
lastrpoint.idx += (col_start+1);
}
}
/* look at all 'normally' crosspoints */
while (cpoint.lastcpoint != GT_UWORD_MAX)
{
prevcpoint = cpoint;
prevcp_type = cp_type;
col_end = col_start;
row_end = row_start;
col_start = prevcpoint.lastcpoint;
switch (prevcpoint.last_type) {
case Affine_R:
cpoint = Diagcolumn[col_start].val_R;
cp_type = Affine_R;
row_start = Diagcolumn[col_start].val_R.currentrowindex;
break;
case Affine_D:
cpoint = Diagcolumn[col_start].val_D;
cp_type = Affine_D;
row_start = Diagcolumn[col_start].val_D.currentrowindex;
break;
default:
gt_assert(prevcpoint.last_type == Affine_I);
cpoint = Diagcolumn[col_start].val_I;
cp_type = Affine_I;
row_start = Diagcolumn[col_start].val_I.currentrowindex;
break;
}
if (prevcp_type == Affine_R ||
((prevcp_type == Affine_I) && (col_end-col_start == 1)))
{
continue;/* next crosspoint is also on the diagonal*/
}
else if (prevcp_type == Affine_D)
{
new_ulen = row_end - row_start - 1;
new_vlen = col_end-col_start-1;
new_left = -1;
new_right = GT_MIN(right_dist-diag-1,new_vlen);
temprpoint = evaluateaffineDBcrosspoints(spacemanager,
Diagcolumn+col_start+1, scorehandler,
Affine_I, cp_type, Affine_D, row_start,
coloffset + col_start + 1, useq, row_start,
new_ulen, vseq, vstart + col_start + 1,
new_vlen, new_left, new_right);
if (temprpoint.idx + col_start + 1 < vlen)
{
GtUword update_idx = temprpoint.idx+1+col_start+1;
Diagcolumn[update_idx].val_R.last_type = temprpoint.edge;
Diagcolumn[update_idx].val_D.last_type = temprpoint.edge;
Diagcolumn[update_idx].val_I.last_type = temprpoint.edge;
}
if (temprpoint.idx + col_start + 1 == lastrpoint.idx)
{
lastrpoint = temprpoint;
lastrpoint.idx += col_start + 1;
}
}
else if (prevcp_type == Affine_I)
{
new_ulen = row_end - row_start-1;
new_left = GT_MAX(left_dist-diag+1,
-(GtWord)new_ulen);
new_right = 0;
temp_entry = Diagcolumn[col_start];
temprpoint = evaluateaffineDBcrosspoints(spacemanager,
Diagcolumn + col_start, scorehandler,
Affine_D, cpoint.last_type, Affine_I, row_start + 1,
coloffset + col_start,
useq, row_start+1, new_ulen,
vseq, vstart+col_start, col_end-col_start-1,
new_left, new_right);
Diagcolumn[col_start] = temp_entry;
Diagcolumn[col_start+1].val_R.last_type = cp_type;
Diagcolumn[col_start+1].val_D.last_type = cp_type;
Diagcolumn[col_start+1].val_I.last_type = cp_type;
Diagcolumn[col_end].val_I.last_type = temprpoint.edge;
}
else
{
/* if (Diagcolumn[cpoint].last_type == Linear_X), never reach this line */
gt_assert(false);
}
}
col_end = col_start;
row_end = row_start;
/* exception, if first crosspoint != 0-entry */
if (vstart-coloffset != col_end)
{
switch (cp_type) {
case Affine_D:
if (row_end == ustart-1)
gt_assert(false);
new_ulen = row_end-ustart-1;
new_left = GT_MAX(-new_ulen, diag);
new_right = GT_MIN(right_dist, (GtWord)col_end);
rpoint = evaluateaffineDBcrosspoints(spacemanager, Diagcolumn,
scorehandler, edge, from_edge,Affine_D,
rowoffset,coloffset, useq, ustart,
new_ulen, vseq, vstart, col_end,
new_left, new_right);
if (col_start + 1 <= vlen)
{
Diagcolumn[col_start+1].val_R.last_type = rpoint.edge;
Diagcolumn[col_start+1].val_D.last_type = rpoint.edge;
Diagcolumn[col_start+1].val_I.last_type = rpoint.edge;
}
if (rpoint.idx == lastrpoint.idx)
lastrpoint = rpoint;
break;
case Affine_I:
new_ulen = row_end-ustart;
new_vlen = col_end-1;
new_left = GT_MAX(left_dist,
-(GtWord)new_ulen);
new_right = GT_MIN(diag,new_vlen);
rpoint = evaluateaffineDBcrosspoints(spacemanager, Diagcolumn,
scorehandler,edge, from_edge, Affine_I,rowoffset,
coloffset, useq, ustart, new_ulen, vseq, vstart,
new_vlen, new_left, new_right);
Diagcolumn[col_start].val_I.last_type = rpoint.edge;
break;
default:
gt_assert(false);
}
}
else if (cp_type == Affine_D)
{
Diagcolumn[1].val_I.last_type = Affine_R;
Diagcolumn[1].val_D.last_type = Affine_R;
Diagcolumn[1].val_R.last_type = Affine_R;
Diagcolumn[0].val_R.currentrowindex = rowoffset;
Diagcolumn[0].val_R.last_type = from_edge;
}
return lastrpoint;
}
/* calculating alignment in linear space within a specified diagonal band
* with affine gapcosts */
static void gt_calc_diagonalbandaffinealign(GtLinspaceManagement *spacemanager,
const GtScoreHandler *scorehandler,
GtAlignment *align,
const GtUchar *useq,
GtUword ustart, GtUword ulen,
const GtUchar *vseq,
GtUword vstart, GtUword vlen,
GtWord left_dist,
GtWord right_dist)
{
GtAffinealignDPentry *Atabcolumn;
GtAffineAlignRtabentry *Rtabcolumn;
GtAffineDiagAlignentry *Diagcolumn;
GtAffineAlignRnode lastnode;
GtUword idx, gap_extension;
gt_assert(align && scorehandler);
if ((left_dist > GT_MIN(0, (GtWord)vlen-(GtWord)ulen))||
(right_dist < GT_MAX(0, (GtWord)vlen-(GtWord)ulen)))
{
gt_assert(false); /* no global alignment */
}
gt_linspace_management_set_ulen(spacemanager, ulen);
gap_extension = gt_scorehandler_get_gapscore(scorehandler);
if (ulen == 0UL)
{
(void) gt_reconstructalignment_trivial_insertion(align, vlen,
gap_extension);
return;
}
else if (vlen == 0UL)
{
(void) gt_reconstructalignment_trivial_deletion(align, ulen,
gap_extension);
return;
}
if (gt_linspace_management_checksquare(spacemanager, ulen, vlen,
sizeof (*Atabcolumn),
sizeof (*Rtabcolumn)))
{
(void) gt_diagonalbandalign_affinegapcost_in_square_space_generic(
spacemanager,
scorehandler, align,
useq, ustart, ulen,
vseq, vstart, vlen,
left_dist, right_dist);
return;
}
gt_linspace_management_check(spacemanager, GT_MIN(right_dist-left_dist,ulen),
vlen, sizeof (*Atabcolumn), sizeof (*Rtabcolumn),
sizeof (*Diagcolumn));
Diagcolumn = gt_linspace_management_get_crosspointTabspace(spacemanager);
/* initialize Diagcolumn */
for (idx = 0; idx <= vlen; idx++)
{
Diagcolumn[idx].val_R =
(GtDiagAlignentry) {GT_UWORD_MAX, GT_UWORD_MAX, Affine_X};
Diagcolumn[idx].val_D =
(GtDiagAlignentry) {GT_UWORD_MAX, GT_UWORD_MAX, Affine_X};
Diagcolumn[idx].val_I =
(GtDiagAlignentry) {GT_UWORD_MAX, GT_UWORD_MAX, Affine_X};
}
lastnode = evaluateaffineDBcrosspoints(spacemanager, Diagcolumn, scorehandler,
Affine_X, Affine_X, Affine_X, 0, 0,
useq, ustart, ulen, vseq, vstart, vlen,
left_dist, right_dist);
/* reconstruct alignment */
gt_reconstructalignment_from_affineDtab(align, Diagcolumn, lastnode.edge,
useq, ulen, vseq, vlen);
}
/* compute alignment with affine gapcosts within a diagonal band */
void gt_diagonalbandalign_affinegapcost_compute_generic(GtLinspaceManagement
*spacemanager,
const GtScoreHandler
*scorehandler,
GtAlignment *align,
const GtUchar *useq,
GtUword ustart,
GtUword ulen,
const GtUchar *vseq,
GtUword vstart,
GtUword vlen,
GtWord left_dist,
GtWord right_dist)
{
gt_assert(useq && vseq && spacemanager && scorehandler && align);
/* set new bounds, if left_dist or right_dist is out of sequence */
left_dist = GT_MAX(-(GtWord) ulen,left_dist);
right_dist = GT_MIN((GtWord) vlen,right_dist);
gt_alignment_set_seqs(align, useq+ustart, ulen, vseq+vstart, vlen);
gt_calc_diagonalbandaffinealign(spacemanager, scorehandler, align,
useq, ustart, ulen,
vseq, vstart, vlen,
left_dist, right_dist);
}
/* compute alignment with affine gapcosts within a diagonal band */
void gt_diagonalbandalign_affinegapcost_compute(GtLinspaceManagement
*spacemanager,
GtAlignment *align,
const GtUchar *useq,
GtUword ustart, GtUword ulen,
const GtUchar *vseq,
GtUword vstart, GtUword vlen,
GtWord left_dist,
GtWord right_dist,
GtUword matchcost,
GtUword mismatchcost,
GtUword gap_opening,
GtUword gap_extension)
{
GtScoreHandler *scorehandler = gt_scorehandler_new(matchcost,
mismatchcost,
gap_opening,
gap_extension);
gt_diagonalbandalign_affinegapcost_compute_generic(spacemanager, scorehandler,
align, useq, ustart, ulen,
vseq, vstart, vlen,
left_dist, right_dist);
gt_scorehandler_delete(scorehandler);
}
void gt_diagonalbandalign_affinegapcost_check(GT_UNUSED bool forward,
const GtUchar *useq, GtUword ulen,
const GtUchar *vseq, GtUword vlen)
{
GtUword affine_cost1, affine_cost2, affine_cost3,
matchcost = 0, mismatchcost = 1,
gap_opening = 2, gap_extension = 1;
GtWord left_dist, right_dist;
GtAlignment *align;
GtScoreHandler *scorehandler;
GtLinspaceManagement *spacemanager;
if (memchr(useq, LINEAR_EDIST_GAP,ulen) != NULL)
{
fprintf(stderr,"%s: sequence u contains gap symbol\n",__func__);
exit(GT_EXIT_PROGRAMMING_ERROR);
}
if (memchr(vseq, LINEAR_EDIST_GAP,vlen) != NULL)
{
fprintf(stderr,"%s: sequence v contains gap symbol\n",__func__);
exit(GT_EXIT_PROGRAMMING_ERROR);
}
left_dist = -ulen;
right_dist = vlen;
scorehandler = gt_scorehandler_new(matchcost, mismatchcost,
gap_opening, gap_extension);
gt_scorehandler_plain(scorehandler);
gt_scorehandler_downcase(scorehandler);
affine_cost1 = gt_diagonalbandalign_affinegapcost_square_space_distance_only(
useq, 0, ulen, vseq, 0, vlen,
left_dist, right_dist, scorehandler);
align = gt_alignment_new_with_seqs(useq, ulen, vseq, vlen);
spacemanager = gt_linspace_management_new();
gt_calc_diagonalbandaffinealign(spacemanager, scorehandler, align,
useq, 0, ulen, vseq, 0, vlen,
left_dist, right_dist);
gt_linspace_management_delete(spacemanager);
affine_cost2 = gt_alignment_eval_with_affine_score(align, true, matchcost,
mismatchcost,
gap_opening,
gap_extension);
if (affine_cost1 != affine_cost2)
{
fprintf(stderr,"gt_diagonalband_affinegapcost_square_space_distance_only = "
GT_WU" != "GT_WU" = gt_alignment_eval_generic_with_affine_score\n",
affine_cost1, affine_cost2);
exit(GT_EXIT_PROGRAMMING_ERROR);
}
affine_cost3 = diagonalband_linear_affine(useq, 0, ulen, vseq, 0, vlen,
left_dist, right_dist, scorehandler);
if (affine_cost3 != affine_cost2)
{
fprintf(stderr,"diagonalband_linear_affine = "GT_WU
" != "GT_WU" = gt_alignment_eval_generic_with_affine_score\n",
affine_cost3, affine_cost2);
exit(GT_EXIT_PROGRAMMING_ERROR);
}
gt_scorehandler_delete(scorehandler);
gt_alignment_delete(align);
}
|
f7330b21b264e0d314842851d4098509b23ed240
|
0e9d7903fae239aa9ff52cd507fdb19a571295cc
|
/rules_haskell_tests/tests/indirect-link/cbits/impl.c
|
666ce43962a388e5453363ae4d6f261c0e77b5a4
|
[
"Apache-2.0"
] |
permissive
|
tweag/rules_haskell
|
e3548d9368e9068dbbff4d6740d6096425170b4a
|
9d29e14b313123a0735abd0989c620d754500452
|
refs/heads/master
| 2023-08-09T02:19:25.690871
| 2023-08-04T16:20:08
| 2023-08-04T16:20:08
| 109,669,839
| 255
| 109
|
Apache-2.0
| 2023-09-07T15:04:48
| 2017-11-06T08:50:01
|
Starlark
|
UTF-8
|
C
| false
| false
| 117
|
c
|
impl.c
|
static int thing;
int real_get_thing(void) {
return thing;
}
void real_set_thing(int value) {
thing = value;
}
|
f482e1fe188c7cb4f299b40d7fc51f8e317c7bef
|
1885ce333f6980ab6aad764b3f8caf42094d9f7d
|
/third_party/btyacc_tp/btyacc/mstring.h
|
51941deb4094ff649ecd95446cb94ada925e1999
|
[
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
satya-das/cppparser
|
1dbccdeed4287c36c61edc30190c82de447e415b
|
f9a4cfac1a3af7286332056d7c661d86b6c35eb3
|
refs/heads/master
| 2023-07-06T00:55:23.382303
| 2022-10-03T19:40:05
| 2022-10-03T19:40:05
| 16,642,636
| 194
| 26
|
MIT
| 2023-06-26T13:44:32
| 2014-02-08T12:20:01
|
C++
|
UTF-8
|
C
| false
| false
| 633
|
h
|
mstring.h
|
#ifndef _string_h_
#define _string_h_
struct mstring {
char *base, *ptr, *end;
};
void msprintf(struct mstring *, const char *, ...);
int mputchar(struct mstring *, int);
struct mstring *msnew(void);
void mstrim(struct mstring *, const char *);
char *msdone(struct mstring *);
/* compare two strings, ignoring whitespace, except between two letters or
** digits (and treat all of these as equal) */
int strnscmp(const char *, const char *);
/* hash a string, ignoring whitespace */
unsigned int strnshash(const char *);
#define mputc(m, ch) ((m)->ptr==(m)->end?mputchar(m,ch):(*(m)->ptr++=(ch)))
#endif /* _string_h_ */
|
7d24668253fa1412e1ec99027dc6dfdf71d6ab61
|
f9e7d65cb784c01a0200145ba8d289afe41d4a56
|
/include/hwtimer.h
|
3c0e9aaf8a330361f4911294a982325999e732b2
|
[
"BSD-3-Clause"
] |
permissive
|
FrameworkComputer/EmbeddedController
|
ad7086769e87d0a4179eae96a7c9ff5e383ff54e
|
f6d6b927eed71550d3475411cfc3e59abe5cef2a
|
refs/heads/hx20-hx30
| 2023-08-08T20:45:10.621169
| 2023-05-26T07:03:59
| 2023-05-26T07:03:59
| 447,021,040
| 846
| 48
|
BSD-3-Clause
| 2023-05-26T07:04:59
| 2022-01-12T00:11:14
|
C
|
UTF-8
|
C
| false
| false
| 3,485
|
h
|
hwtimer.h
|
/* Copyright 2013 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
/* Hardware timer driver API */
#ifndef __CROS_EC_HWTIMER_H
#define __CROS_EC_HWTIMER_H
/**
* Programs when the next timer should fire an interrupt.
*
* The deadline is ahead of the current counter (which may of course wrap) by
* the number of microseconds until the interrupt should fire.
*
* @param deadline timestamp of the event in microseconds
*/
void __hw_clock_event_set(uint32_t deadline);
/* Returns the timestamp of the next programed event */
uint32_t __hw_clock_event_get(void);
/* Cancel the next event programed by __hw_clock_event_set */
void __hw_clock_event_clear(void);
/**
* Get the lower 32-bits of the free-running counter used as clock
*
* The counter resolution must be 1us, since udelay() relies on this.
*
* @return current counter value
*/
#ifdef CONFIG_HWTIMER_64BIT
__override_proto
#endif
uint32_t __hw_clock_source_read(void);
/**
* Override the lower 32-bits of the hardware counter
*
* The new value takes effect immediately and the counter continues counting
* from there, assuming it is enabled
*
* @ts Value to write
*/
void __hw_clock_source_set(uint32_t ts);
/**
* Get the 64-bit value of the free-running counter used as clock,
* only available when CONFIG_HWTIMER_64BIT is enabled.
*
* This function should only be used by common/timer.c or
* chip-specific code, as get_time() abstracts the config option away.
*/
uint64_t __hw_clock_source_read64(void);
/**
* Override the 64-bit value of the free-running counter used as
* clock, only available when CONFIG_HWTIMER_64BIT is enabled.
*
* This function should only be used by common/timer.c or
* chip-specific code, as force_time() abstracts the config option
* away.
*/
void __hw_clock_source_set64(uint64_t timestamp);
/**
* Enable clock to a timer.
*
* @param n Timer number to enable/disable
* @param enable Enable (!=0) or disable (=0) clock to timer
*/
void __hw_timer_enable_clock(int n, int enable);
/**
* Initializes the hardware timer used to provide clock services, using the
* specified start timer value.
*
* It returns the IRQ number of the timer routine.
*/
int __hw_clock_source_init(uint32_t start_t);
/**
* Initializes the hardware timer used to provide clock services, using the
* specified start timer value (CONFIG_HWTIMER_64BIT enabled).
*
* It returns the IRQ number of the timer routine.
*/
int __hw_clock_source_init64(uint64_t start_t);
/**
* Searches the next deadline and program it in the timer hardware.
*
* overflow: if true, the 32-bit counter as overflowed since the last
* call. Goes unused if CONFIG_HWTIMER_64BIT is enabled.
*
* This function is exported from the common timers code as an helper for the
* hardware timer interrupt routine.
*/
void process_timers(int overflow);
/**
* Set up the timer that we will use as a watchdog warning.
*
* Once this has been set up, we will print a warning shortly before the
* real watchdog fires. To avoid this, hwtimer_reset_watchdog() must be
* called periodically.
*
* This is needed since the real watchdog timer (IWDG) does not provide
* an interrupt to warn of an impending watchdog reset.
*/
void hwtimer_setup_watchdog(void);
/* Reset the watchdog timer, to avoid the watchdog warning */
void hwtimer_reset_watchdog(void);
#endif /* __CROS_EC_HWTIMER_H */
|
8a4248e56bc059f4b9f8da9ae4d19def031a2b47
|
407c96d904cf46a5f95217e44071f999783698a3
|
/src/Wasm.h
|
77071075ce098d8877343049e8a679411ae8d0db
|
[
"BSD-3-Clause",
"LicenseRef-scancode-protobuf",
"LGPL-2.1-only",
"Swift-exception",
"MIT",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
trustwallet/wallet-core
|
dfeb276ddf1e3faf46c82f0a0cda2551e9c873f6
|
0c8e2e58aa8eb1360e4a6b03df91fb2de97e3caa
|
refs/heads/master
| 2023-08-21T11:58:43.983035
| 2023-08-18T10:51:13
| 2023-08-18T10:51:13
| 170,738,310
| 2,311
| 1,283
|
Apache-2.0
| 2023-09-08T11:26:39
| 2019-02-14T18:25:54
|
C++
|
UTF-8
|
C
| false
| false
| 498
|
h
|
Wasm.h
|
// Copyright © 2017-2023 Trust Wallet.
//
// This file is part of Trust. The full Trust copyright notice, including
// terms governing use, modification, and redistribution, is contained in the
// file LICENSE at the root of the source code distribution tree.
#ifndef __USE_WASM
#define __USE_WASM
#endif // __USE_WASM
#ifndef __USE_MISC
#ifdef __USE_WASM
typedef unsigned long int ulong;
typedef unsigned short int ushort;
typedef unsigned int uint;
#endif // __USE_WASM
#endif // __USE_MISC
|
3b8f33e8ff6e927b151372d5fd7639744ccd3d0d
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/mail/meta1/files/patch-smtps_smtps.c
|
64ece87b7c165278763aadb118d077cebda093d6
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
C
| false
| false
| 424
|
c
|
patch-smtps_smtps.c
|
--- smtps/smtps.c.orig 2022-09-24 16:59:28 UTC
+++ smtps/smtps.c
@@ -2168,9 +2168,11 @@ ss_proxy_policy(sasl_conn_t *conn, void *context, cons
return SASL_OK;
}
+typedef int (*sasl_callback_ft)(void);
+
static sasl_callback_t sm_sasl_srvcbs[] =
{
- { SASL_CB_PROXY_POLICY, &ss_proxy_policy, NULL },
+ { SASL_CB_PROXY_POLICY, (sasl_callback_ft)&ss_proxy_policy, NULL },
{ SASL_CB_LIST_END, NULL, NULL }
};
#endif
|
77f732bfed7cee72e56b1d25bfed015938a4072e
|
05819963250c2ae0ba59ffef48d7c99a5b6b7cfd
|
/drivers/audio/pdmic.c
|
92cb195bef13805a41dd88210b6d72fc23377a7d
|
[
"LicenseRef-scancode-bsd-atmel"
] |
permissive
|
atmelcorp/atmel-software-package
|
cefa3213069995d453d3b47b8b3aa7a7aca683ac
|
e0428c7c8175a42a2460cff27bb0501db0bbe160
|
refs/heads/master
| 2023-04-13T16:34:56.181081
| 2023-04-06T17:30:10
| 2023-04-11T06:05:12
| 47,840,424
| 117
| 94
|
NOASSERTION
| 2022-10-20T03:07:15
| 2015-12-11T17:18:56
|
C
|
UTF-8
|
C
| false
| false
| 9,591
|
c
|
pdmic.c
|
/* ----------------------------------------------------------------------------
* SAM Software Package License
* ----------------------------------------------------------------------------
* Copyright (c) 2016, Atmel Corporation
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the disclaimer below.
*
* Atmel's name may not be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ----------------------------------------------------------------------------
*/
/*----------------------------------------------------------------------------
* Headers
*----------------------------------------------------------------------------*/
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include "audio/pdmic.h"
#include "callback.h"
#include "chip.h"
#include "dma/dma.h"
#include "errno.h"
#include "mm/cache.h"
#include "peripherals/pmc.h"
#include "trace.h"
/*----------------------------------------------------------------------------
* Local functions
*----------------------------------------------------------------------------*/
static int _pdmic_dma_transfer_callback(void* arg, void* arg2)
{
struct _pdmic_desc* desc = (struct _pdmic_desc*)arg;
cache_invalidate_region(desc->rx.dma.cfg.daddr, desc->rx.dma.cfg.len);
dma_reset_channel(desc->rx.dma.channel);
mutex_unlock(&desc->rx.mutex);
return callback_call(&desc->rx.callback, NULL);
}
static void _pdmic_dma_transfer(struct _pdmic_desc* desc, struct _buffer* buffer)
{
struct _callback _cb;
memset(&desc->rx.dma.cfg, 0, sizeof(desc->rx.dma.cfg));
desc->rx.dma.cfg.saddr = (void*)&desc->addr->PDMIC_CDR;
desc->rx.dma.cfg.daddr = buffer->data;
if (desc->dsp_size == PDMIC_CONVERTED_DATA_SIZE_32) {
desc->rx.dma.cfg.len = buffer->size / 4;
desc->rx.dma.cfg_dma.data_width = DMA_DATA_WIDTH_WORD;
} else {
desc->rx.dma.cfg.len = buffer->size / 2;
desc->rx.dma.cfg_dma.data_width = DMA_DATA_WIDTH_HALF_WORD;
}
dma_configure_transfer(desc->rx.dma.channel, &desc->rx.dma.cfg_dma, &desc->rx.dma.cfg, 1);
callback_set(&_cb, _pdmic_dma_transfer_callback, (void*)desc);
dma_set_callback(desc->rx.dma.channel, &_cb);
dma_start_transfer(desc->rx.dma.channel);
}
static void _pdmic_polling_transfer(struct _pdmic_desc* desc, struct _buffer* buffer)
{
uint16_t* data = (uint16_t*)buffer->data;
uint32_t length = buffer->size / sizeof(uint16_t);
volatile uint32_t current = 0;
while (current < length) {
if (pdmic_data_ready(desc)) {
/* start copy data from PDMIC_CDR to memory */
*data = desc->addr->PDMIC_CDR;
data++;
current++;
}
}
mutex_unlock(&desc->rx.mutex);
callback_call(&desc->rx.callback, NULL);
}
/*----------------------------------------------------------------------------
* Exported functions
*----------------------------------------------------------------------------*/
void pdmic_enable(struct _pdmic_desc* desc)
{
uint32_t id = get_pdmic_id_from_addr(desc->addr);
pmc_enable_gck(id);
pmc_enable_peripheral(id);
/* Enable the overrun error interrupt */
desc->addr->PDMIC_IER = PDMIC_IER_OVRE;
}
void pdmic_disable(struct _pdmic_desc* desc)
{
uint32_t id = get_pdmic_id_from_addr(desc->addr);
/* Disable the overrun error interrupt */
desc->addr->PDMIC_IDR = PDMIC_IDR_OVRE;
pmc_disable_gck(id);
pmc_disable_peripheral(id);
}
int pdmic_configure(struct _pdmic_desc *desc)
{
uint32_t mr_val;
uint32_t dspr0_val, dspr1_val;
uint32_t pclk_rate, gclk_rate;
uint32_t pclk_prescal, gclk_prescal;
uint32_t f_pdmic;
uint32_t id = get_pdmic_id_from_addr(desc->addr);
desc->addr->PDMIC_CR = (desc->addr->PDMIC_CR & ~PDMIC_CR_ENPDM) | PDMIC_CR_SWRST;
if (desc->channels != 1) {
trace_error("only supports one channel\n");
return -EINVAL;
}
switch (desc->dsp_size) {
case PDMIC_CONVERTED_DATA_SIZE_16:
dspr0_val = PDMIC_DSPR0_SIZE_16;
break;
case PDMIC_CONVERTED_DATA_SIZE_32:
dspr0_val = PDMIC_DSPR0_SIZE_32;
break;
default:
return -EINVAL;
}
switch (desc->dsp_osr) {
case PDMIC_OVER_SAMPLING_RATIO_64:
dspr0_val |= PDMIC_DSPR0_OSR(1);
break;
case PDMIC_OVER_SAMPLING_RATIO_128:
dspr0_val |= PDMIC_DSPR0_OSR(0);
break;
default:
return -EINVAL;
}
switch (desc->dsp_hpfbyp) {
case PDMIC_DSP_HIGH_PASS_FILTER_ON:
dspr0_val &= ~PDMIC_DSPR0_HPFBYP;
break;
case PDMIC_DSP_HIGH_PASS_FILTER_OFF:
dspr0_val |= PDMIC_DSPR0_HPFBYP;
break;
default:
return -EINVAL;
}
switch (desc->dsp_sinbyp) {
case PDMIC_DSP_SINCC_PASS_FILTER_ON:
dspr0_val &= ~PDMIC_DSPR0_SINBYP;
break;
case PDMIC_DSP_SINCC_PASS_FILTER_OFF:
dspr0_val |= PDMIC_DSPR0_SINBYP;
break;
default:
return -EINVAL;
}
if (desc->dsp_shift < PDMIC_DSPR_SHIFT_MAX_VAL)
dspr0_val |= PDMIC_DSPR0_SHIFT(desc->dsp_shift);
else
return -EINVAL;
if (desc->dsp_scale < PDMIC_DSPR_SCALE_MAX_VAL)
dspr0_val |= PDMIC_DSPR0_SCALE(desc->dsp_scale);
else
return false;
dspr1_val = PDMIC_DSPR1_OFFSET(desc->dsp_offset);
if (desc->dsp_dgain < PDMIC_DSPR_DGAIN_MAX_VAL)
dspr1_val |= PDMIC_DSPR1_DGAIN(desc->dsp_dgain);
else
return -EINVAL;
desc->addr->PDMIC_DSPR0 = dspr0_val;
desc->addr->PDMIC_DSPR1 = dspr1_val;
f_pdmic = (desc->sample_rate * desc->dsp_osr);
pclk_rate = pmc_get_peripheral_clock(id);
gclk_rate = pmc_get_gck_clock(id);
/* PRESCAL = SELCK/(2*f_pdmic) - 1*/
pclk_prescal = (uint32_t)(pclk_rate / (f_pdmic << 1)) - 1;
gclk_prescal = (uint32_t)(gclk_rate / (f_pdmic << 1)) - 1;
if (pclk_prescal < PDMIC_MR_PRESCAL_MAX_VAL) {
mr_val = PDMIC_MR_PRESCAL(pclk_prescal) | PDMIC_MR_CLKS_PCLK;
} else if (gclk_prescal < PDMIC_MR_PRESCAL_MAX_VAL) {
mr_val = PDMIC_MR_PRESCAL(gclk_prescal) | PDMIC_MR_CLKS_GCLK;
} else {
trace_error("PDMIC Prescal configure error");
return -EINVAL;
}
desc->rx.dma.channel = dma_allocate_channel(id, DMA_PERIPH_MEMORY);
assert(desc->rx.dma.channel);
desc->rx.dma.cfg_dma.incr_saddr = false;
desc->rx.dma.cfg_dma.incr_daddr = true;
desc->rx.dma.cfg_dma.loop = false;
desc->rx.dma.cfg_dma.chunk_size = DMA_CHUNK_SIZE_1;
/* write configuration */
desc->addr->PDMIC_MR = mr_val;
return 0;
}
int pdmic_set_gain(struct _pdmic_desc* desc, uint16_t dgain, uint8_t scale)
{
uint32_t dspr0_scale, dspr1_dgain;
if (dgain < PDMIC_DSPR_DGAIN_MAX_VAL &&
scale < PDMIC_DSPR_SCALE_MAX_VAL) {
dspr0_scale = PDMIC_DSPR0_SCALE(scale);
dspr1_dgain = PDMIC_DSPR1_DGAIN(dgain);
desc->addr->PDMIC_DSPR0 &= ~PDMIC_DSPR0_SCALE_Msk;
desc->addr->PDMIC_DSPR1 &= ~PDMIC_DSPR1_DGAIN_Msk;
desc->addr->PDMIC_DSPR0 |= dspr0_scale;
desc->addr->PDMIC_DSPR1 |= dspr1_dgain;
return 0;
}
return -EINVAL;
}
void pdmic_stream_convert(struct _pdmic_desc* desc, bool flag)
{
if (flag)
desc->addr->PDMIC_CR |= PDMIC_CR_ENPDM;
else
desc->addr->PDMIC_CR &= ~PDMIC_CR_ENPDM;
}
int pdmic_init(struct _pdmic_desc *desc)
{
uint32_t id = get_pdmic_id_from_addr(desc->addr);
#if (TRACE_LEVEL >= TRACE_LEVEL_DEBUG)
uint32_t pclk, gclk;
pclk = pmc_get_peripheral_clock(id);
trace_debug("-- PDMIC PCLK: %uMHz --\n\r", (unsigned)(pclk / 1000000));
#endif
/* The gclk clock frequency must always be three times
* lower than the pclk clock frequency
*/
struct _pmc_periph_cfg cfg = {
.gck = {
.css = PMC_PCR_GCKCSS_PLLA_CLK,
.div = 18,
},
};
pmc_configure_peripheral(id, &cfg, true);
#if (TRACE_LEVEL >= TRACE_LEVEL_DEBUG)
gclk = pmc_get_gck_clock(id);
trace_debug("-- PDMIC GCLK: %uMHz --\n\r", (unsigned)(gclk / 1000000));
#endif
pdmic_enable(desc);
return pdmic_configure(desc);
}
bool pdmic_data_ready(struct _pdmic_desc* desc)
{
return (desc->addr->PDMIC_ISR & PDMIC_ISR_DRDY) == PDMIC_ISR_DRDY;
}
int pdmic_transfer(struct _pdmic_desc* desc, struct _buffer* buf, struct _callback* cb)
{
uint8_t tmode;
tmode = desc->transfer_mode;
if ((buf == NULL) || (buf->size == 0))
return -EINVAL;
if (buf->attr & PDMIC_BUF_ATTR_READ) {
mutex_lock(&desc->rx.mutex);
callback_copy(&desc->rx.callback, cb);
desc->rx.transferred = 0;
desc->rx.buffer.data = buf->data;
desc->rx.buffer.size = buf->size;
desc->rx.buffer.attr = buf->attr;
if (tmode == PDMIC_MODE_DMA)
_pdmic_dma_transfer(desc, buf);
else if (tmode == PDMIC_MODE_POLLING)
_pdmic_polling_transfer(desc, buf);
}
return 0;
}
bool pdmic_rx_transfer_is_done(struct _pdmic_desc* desc)
{
return (!mutex_is_locked(&desc->rx.mutex));
}
void pdmic_rx_stop(struct _pdmic_desc* desc)
{
if (desc->transfer_mode == PDMIC_MODE_DMA) {
if (desc->rx.dma.channel){
dma_stop_transfer(desc->rx.dma.channel);
mutex_unlock(&desc->rx.mutex);
}
}
}
|
9c13e2db4fd4dbc635e418774bbfdc65d9f7c0a7
|
74e52e506f15c01e2dd1acd35508c7da26cf9c80
|
/io_stream.c
|
5e4e31d2d214c3d961180d780f7eacd7a0e11ab2
|
[
"PHP-3.01",
"LicenseRef-scancode-other-permissive",
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
mongodb/mongo-php-driver-legacy
|
2c596fdaa2be607ffebfc20226ffdefbc816337e
|
ce796fef410487eeec220e0562adf33fb9ecae09
|
refs/heads/master
| 2023-08-23T06:37:26.114245
| 2022-01-31T17:54:30
| 2022-01-31T17:54:30
| 121,842
| 169
| 72
|
NOASSERTION
| 2022-01-31T17:55:11
| 2009-02-04T21:30:35
|
PHP
|
UTF-8
|
C
| false
| false
| 38,957
|
c
|
io_stream.c
|
/**
* Copyright 2009-2014 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "io_stream.h"
#include "log_stream.h"
#include "mcon/types.h"
#include "mcon/utils.h"
#include "mcon/manager.h"
#include "mcon/connections.h"
#include "php_mongo.h"
#include "contrib/crypto.h"
#include "api/wire_version.h"
#ifdef PHP_WIN32
# include "config.w32.h"
#else
# include <php_config.h>
#endif
#include <php.h>
#include <main/php_streams.h>
#include <main/php_network.h>
#include <ext/standard/file.h>
#include <ext/standard/sha1.h>
#include <ext/standard/base64.h>
#include <ext/standard/php_string.h>
#if PHP_WIN32
# include "win32/winutil.h"
#endif
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#ifdef HAVE_MONGO_OPENSSL
# include "contrib/php-ssl.h"
#endif
#if HAVE_MONGO_SASL
#include <sasl/sasl.h>
#include <sasl/saslutil.h>
#endif
#define PHP_MONGO_SCRAM_HASH_1 "SCRAM-SHA-1"
#define PHP_MONGO_SCRAM_SERVER_KEY "Server Key"
#define PHP_MONGO_SCRAM_CLIENT_KEY "Client Key"
#define PHP_MONGO_SCRAM_HASH_SIZE 20
extern zend_class_entry *mongo_ce_ConnectionException;
ZEND_EXTERN_MODULE_GLOBALS(mongo)
#ifdef HAVE_MONGO_OPENSSL
# if PHP_VERSION_ID < 50600
int php_mongo_verify_hostname(mongo_server_def *server, X509 *cert TSRMLS_DC)
{
if (php_mongo_matches_san_list(cert, server->host) == SUCCESS) {
return SUCCESS;
}
if (php_mongo_matches_common_name(cert, server->host TSRMLS_CC) == SUCCESS) {
return SUCCESS;
}
return FAILURE;
}
# endif
#endif
void* php_mongo_io_stream_connect(mongo_con_manager *manager, mongo_server_def *server, mongo_server_options *options, char **error_message)
{
char *errmsg;
int errcode;
php_stream *stream;
char *hash = mongo_server_create_hash(server);
struct timeval ctimeout = {0, 0};
char *dsn;
int dsn_len;
int tcp_socket = 1;
zend_error_handling error_handler;
TSRMLS_FETCH();
if (server->host[0] == '/') {
dsn_len = spprintf(&dsn, 0, "unix://%s", server->host);
tcp_socket = 0;
} else {
dsn_len = spprintf(&dsn, 0, "tcp://%s:%d", server->host, server->port);
}
/* Connection timeout behavior varies based on the following:
* - Negative => no timeout (i.e. block indefinitely)
* - Zero => not specified (PHP will use default_socket_timeout)
* - Positive => used specified timeout */
if (options->connectTimeoutMS) {
/* Convert negative value to -1 second, which implies no timeout */
int connectTimeoutMS = options->connectTimeoutMS < 0 ? -1000 : options->connectTimeoutMS;
ctimeout.tv_sec = connectTimeoutMS / 1000;
ctimeout.tv_usec = (connectTimeoutMS % 1000) * 1000;
mongo_manager_log(manager, MLOG_CON, MLOG_FINE, "Connecting to %s (%s) with connection timeout: %d.%06d", dsn, hash, ctimeout.tv_sec, ctimeout.tv_usec);
} else {
mongo_manager_log(manager, MLOG_CON, MLOG_FINE, "Connecting to %s (%s) without connection timeout (default_socket_timeout will be used)", dsn, hash);
}
zend_replace_error_handling(EH_THROW, mongo_ce_ConnectionException, &error_handler TSRMLS_CC);
stream = php_stream_xport_create(dsn, dsn_len, 0, STREAM_XPORT_CLIENT | STREAM_XPORT_CONNECT, hash, options->connectTimeoutMS > 0 ? &ctimeout : NULL, (php_stream_context *)options->ctx, &errmsg, &errcode);
zend_restore_error_handling(&error_handler TSRMLS_CC);
efree(dsn);
free(hash);
if (!stream) {
/* error_message will be free()d, but errmsg was allocated by PHP and needs efree() */
*error_message = strdup(errmsg);
efree(errmsg);
return NULL;
}
if (tcp_socket) {
int socket = ((php_netstream_data_t*)stream->abstract)->socket;
int flag = 1;
setsockopt(socket, IPPROTO_TCP, TCP_NODELAY, (char *) &flag, sizeof(int));
}
if (options->ssl) {
int crypto_enabled;
/* Capture the server certificate so we can do further verification */
if (stream->context) {
zval capture;
ZVAL_BOOL(&capture, 1);
php_stream_context_set_option(stream->context, "ssl", "capture_peer_cert", &capture);
}
zend_replace_error_handling(EH_THROW, mongo_ce_ConnectionException, &error_handler TSRMLS_CC);
/* PHP 5.6.0 until 5.6.7 screwed things a bit, see https://bugs.php.net/bug.php?id=69195 */
#if PHP_VERSION_ID >= 50600 && PHP_VERSION_ID < 50607
if (php_stream_xport_crypto_setup(stream, STREAM_CRYPTO_METHOD_ANY_CLIENT, NULL TSRMLS_CC) < 0) {
#else
if (php_stream_xport_crypto_setup(stream, STREAM_CRYPTO_METHOD_SSLv23_CLIENT, NULL TSRMLS_CC) < 0) {
#endif
zend_restore_error_handling(&error_handler TSRMLS_CC);;
*error_message = strdup("Cannot setup SSL, is ext/openssl loaded?");
php_stream_close(stream);
return NULL;
}
crypto_enabled = php_stream_xport_crypto_enable(stream, 1 TSRMLS_CC);
zend_restore_error_handling(&error_handler TSRMLS_CC);;
if (crypto_enabled < 0) {
/* Setting up crypto failed. Thats only OK if we only preferred it */
if (options->ssl == MONGO_SSL_PREFER) {
/* FIXME: We can't actually get here because we reject setting
* this option to prefer in mcon/parse.c. This is however
* probably what we need to do in the future when mongod starts
* actually supporting this! :) */
mongo_manager_log(manager, MLOG_CON, MLOG_INFO, "stream_connect: Failed establishing SSL for %s:%d", server->host, server->port);
php_stream_xport_crypto_enable(stream, 0 TSRMLS_CC);
} else {
*error_message = strdup("Can't connect over SSL, is mongod running with SSL?");
php_stream_close(stream);
return NULL;
}
} else if (stream->context) {
#ifdef HAVE_MONGO_OPENSSL
zval **zcert;
if (php_stream_context_get_option(stream->context, "ssl", "peer_certificate", &zcert) == SUCCESS && Z_TYPE_PP(zcert) == IS_RESOURCE) {
zval **verify_peer_name, **verify_expiry;
int resource_type;
X509 *cert;
int type;
zend_list_find(Z_LVAL_PP(zcert), &resource_type);
cert = (X509 *)zend_fetch_resource(zcert TSRMLS_CC, -1, "OpenSSL X.509", &type, 1, resource_type);
if (!cert) {
*error_message = strdup("Couldn't capture remote certificate to validate");
mongo_manager_log(manager, MLOG_CON, MLOG_WARN, "Could not capture certificate of %s:%d", server->host, server->port);
php_stream_close(stream);
return NULL;
}
#if PHP_VERSION_ID < 50600
/* This option is available since PHP 5.6.0 */
if (php_stream_context_get_option(stream->context, "ssl", "verify_peer_name", &verify_peer_name) == SUCCESS && zend_is_true(*verify_peer_name)) {
if (php_mongo_verify_hostname(server, cert TSRMLS_CC) == FAILURE) {
*error_message = strdup("Cannot verify remote certificate: Hostname doesn't match");
mongo_manager_log(manager, MLOG_CON, MLOG_WARN, "Remote certificate SubjectAltName or CN does not match '%s'", server->host);
php_stream_close(stream);
return NULL;
}
mongo_manager_log(manager, MLOG_CON, MLOG_INFO, "stream_connect: Valid peer name for %s:%d", server->host, server->port);
} else {
mongo_manager_log(manager, MLOG_CON, MLOG_WARN, "Not verifying peer name for %s:%d, please use 'verify_peer_name' SSL context option", server->host, server->port);
}
#endif
if (php_stream_context_get_option(stream->context, "ssl", "verify_expiry", &verify_expiry) == SUCCESS && zend_is_true(*verify_expiry)) {
time_t current = time(NULL);
time_t valid_from = php_mongo_asn1_time_to_time_t(X509_get_notBefore(cert) TSRMLS_CC);
time_t valid_until = php_mongo_asn1_time_to_time_t(X509_get_notAfter(cert) TSRMLS_CC);
if (valid_from > current) {
*error_message = strdup("Failed expiration check: Certificate is not valid yet");
mongo_manager_log(manager, MLOG_CON, MLOG_WARN, "Certificate is not valid yet on %s:%d", server->host, server->port);
php_stream_close(stream);
return NULL;
}
if (current > valid_until) {
*error_message = strdup("Failed expiration check: Certificate has expired");
mongo_manager_log(manager, MLOG_CON, MLOG_WARN, "Certificate has expired on %s:%d", server->host, server->port);
php_stream_close(stream);
return NULL;
}
mongo_manager_log(manager, MLOG_CON, MLOG_INFO, "stream_connect: Valid issue and expire dates for %s:%d", server->host, server->port);
} else {
mongo_manager_log(manager, MLOG_CON, MLOG_WARN, "Certificate expiration checks disabled");
}
}
#endif /* HAVE_MONGO_OPENSSL */
mongo_manager_log(manager, MLOG_CON, MLOG_INFO, "stream_connect: Establish SSL for %s:%d", server->host, server->port);
}
} else {
mongo_manager_log(manager, MLOG_CON, MLOG_INFO, "stream_connect: Not establishing SSL for %s:%d", server->host, server->port);
}
/* Socket timeout behavior uses the same logic as connectTimeoutMS */
if (options->socketTimeoutMS) {
struct timeval rtimeout = {0, 0};
/* Convert negative value to -1 second, which implies no timeout */
int socketTimeoutMS = options->socketTimeoutMS < 0 ? -1000 : options->socketTimeoutMS;
rtimeout.tv_sec = socketTimeoutMS / 1000;
rtimeout.tv_usec = (socketTimeoutMS % 1000) * 1000;
php_stream_set_option(stream, PHP_STREAM_OPTION_READ_TIMEOUT, 0, &rtimeout);
mongo_manager_log(MonGlo(manager), MLOG_CON, MLOG_FINE, "Setting stream timeout to %d.%06d", rtimeout.tv_sec, rtimeout.tv_usec);
}
/* Avoid a weird leak warning in debug mode when freeing the stream */
#if ZEND_DEBUG
stream->__exposed = 1;
#endif
return stream;
}
/* Returns the bytes read on success
* Returns -31 on unknown failure
* Returns -80 on timeout
* Returns -32 when remote server closes the connection
*/
int php_mongo_io_stream_read(mongo_connection *con, mongo_server_options *options, int timeout, void *data, int size, char **error_message)
{
int num = 1, received = 0, revert_timeout = 0;
int socketTimeoutMS = options->socketTimeoutMS;
struct timeval rtimeout = {0, 0};
TSRMLS_FETCH();
/* Use default_socket_timeout INI setting if zero */
if (socketTimeoutMS == 0) {
socketTimeoutMS = FG(default_socket_timeout) * 1000;
}
/* Convert negative values to -1 second, which implies no timeout */
socketTimeoutMS = socketTimeoutMS < 0 ? -1000 : socketTimeoutMS;
timeout = timeout < 0 ? -1000 : timeout;
/* Socket timeout behavior varies based on the following:
* - Negative => no timeout (i.e. block indefinitely)
* - Zero => not specified (no changes to existing configuration)
* - Positive => used specified timeout (revert to previous value later) */
if (timeout && timeout != socketTimeoutMS) {
rtimeout.tv_sec = timeout / 1000;
rtimeout.tv_usec = (timeout % 1000) * 1000;
revert_timeout = 1; /* We'll want to revert to the old timeout later */
php_stream_set_option(con->socket, PHP_STREAM_OPTION_READ_TIMEOUT, 0, &rtimeout);
mongo_manager_log(MonGlo(manager), MLOG_CON, MLOG_FINE, "Setting the stream timeout to %d.%06d", rtimeout.tv_sec, rtimeout.tv_usec);
} else {
/* Calculate this now in case we need it for the "timed_out" error message */
rtimeout.tv_sec = socketTimeoutMS / 1000;
rtimeout.tv_usec = (socketTimeoutMS % 1000) * 1000;
/* Apply socketTimeoutMS in case the timeout was altered by another
* MongoClient (the stream may be a persistent connection). From the
* perspective of this MongoClient, the timeout is not changing. */
php_stream_set_option(con->socket, PHP_STREAM_OPTION_READ_TIMEOUT, 0, &rtimeout);
mongo_manager_log(MonGlo(manager), MLOG_CON, MLOG_FINE, "No timeout changes for %s", con->hash);
}
php_mongo_stream_notify_io(con->socket, MONGO_STREAM_NOTIFY_IO_READ, 0, size TSRMLS_CC);
/* this can return FAILED if there is just no more data from db */
while (received < size && num > 0) {
int len = 4096 < (size - received) ? 4096 : size - received;
zend_error_handling error_handler;
zend_replace_error_handling(EH_THROW, mongo_ce_ConnectionException, &error_handler TSRMLS_CC);
num = php_stream_read(con->socket, (char *) data, len);
zend_restore_error_handling(&error_handler TSRMLS_CC);;
if (num < 0) {
/* Doesn't look like this can happen, php_sockop_read overwrites
* the failure from recv() to return 0 */
*error_message = strdup("Read from socket failed");
return -31;
}
/* It *may* have failed. It also may simply have no data */
if (num == 0) {
zval *metadata;
MAKE_STD_ZVAL(metadata);
array_init(metadata);
if (php_stream_populate_meta_data(con->socket, metadata)) {
zval **tmp;
if (zend_hash_find(Z_ARRVAL_P(metadata), "timed_out", sizeof("timed_out"), (void**)&tmp) == SUCCESS) {
convert_to_boolean_ex(tmp);
if (Z_BVAL_PP(tmp)) {
*error_message = malloc(256);
snprintf(*error_message, 256, "Read timed out after reading %d bytes, waited for %d.%06d seconds", num, rtimeout.tv_sec, rtimeout.tv_usec);
zval_ptr_dtor(&metadata);
return -80;
}
}
if (zend_hash_find(Z_ARRVAL_P(metadata), "eof", sizeof("eof"), (void**)&tmp) == SUCCESS) {
convert_to_boolean_ex(tmp);
if (Z_BVAL_PP(tmp)) {
*error_message = strdup("Remote server has closed the connection");
zval_ptr_dtor(&metadata);
return -32;
}
}
}
zval_ptr_dtor(&metadata);
}
data = (char*)data + num;
received += num;
}
/* PHP may have sent notify-progress of *more then* 'received' in some
* cases.
* PHP will read 8192 byte chunks at a time, but if we request less data
* then that PHP will just buffer the rest, which is fine. It could
* confuse users a little, why their progress update was higher then the
* max-bytes-expected though... */
php_mongo_stream_notify_io(con->socket, MONGO_STREAM_NOTIFY_IO_COMPLETED, received, size TSRMLS_CC);
/* If the timeout was changed, revert to the previous value now */
if (revert_timeout) {
/* If socketTimeoutMS was never specified, revert to default_socket_timeout */
if (options->socketTimeoutMS == 0) {
mongo_manager_log(MonGlo(manager), MLOG_CON, MLOG_FINE, "Stream timeout will be reverted to default_socket_timeout (%d)", FG(default_socket_timeout));
}
rtimeout.tv_sec = socketTimeoutMS / 1000;
rtimeout.tv_usec = (socketTimeoutMS % 1000) * 1000;
php_stream_set_option(con->socket, PHP_STREAM_OPTION_READ_TIMEOUT, 0, &rtimeout);
mongo_manager_log(MonGlo(manager), MLOG_CON, MLOG_FINE, "Now setting stream timeout back to %d.%06d", rtimeout.tv_sec, rtimeout.tv_usec);
}
return received;
}
int php_mongo_io_stream_send(mongo_connection *con, mongo_server_options *options, void *data, int size, char **error_message)
{
int retval;
zend_error_handling error_handler;
TSRMLS_FETCH();
php_mongo_stream_notify_io(con->socket, MONGO_STREAM_NOTIFY_IO_WRITE, 0, size TSRMLS_CC);
zend_replace_error_handling(EH_THROW, mongo_ce_ConnectionException, &error_handler TSRMLS_CC);
retval = php_stream_write(con->socket, (char *) data, size);
zend_restore_error_handling(&error_handler TSRMLS_CC);;
if (retval >= size) {
php_mongo_stream_notify_io(con->socket, MONGO_STREAM_NOTIFY_IO_COMPLETED, size, size TSRMLS_CC);
}
return retval;
}
void php_mongo_io_stream_close(mongo_connection *con, int why)
{
TSRMLS_FETCH();
if (why == MONGO_CLOSE_BROKEN) {
if (con->socket) {
php_stream_free(con->socket, PHP_STREAM_FREE_CLOSE_PERSISTENT | PHP_STREAM_FREE_RSRC_DTOR);
}
} else if (why == MONGO_CLOSE_SHUTDOWN) {
/* No need to do anything, it was freed from the persistent_list */
}
}
void php_mongo_io_stream_forget(mongo_con_manager *manager, mongo_connection *con)
{
zend_rsrc_list_entry *le;
TSRMLS_FETCH();
/* When we fork we need to unregister the parents hash so we don't
* accidentally destroy it */
if (zend_hash_find(&EG(persistent_list), con->hash, strlen(con->hash) + 1, (void*) &le) == SUCCESS) {
((php_stream *)con->socket)->in_free = 1;
zend_hash_del(&EG(persistent_list), con->hash, strlen(con->hash) + 1);
((php_stream *)con->socket)->in_free = 0;
}
}
#if HAVE_MONGO_SASL
static int is_sasl_failure(sasl_conn_t *conn, int result, char **error_message)
{
if (result < 0) {
*error_message = malloc(256);
snprintf(*error_message, 256, "Authentication error: %s", sasl_errstring(result, NULL, NULL));
return 1;
}
return 0;
}
static sasl_conn_t *php_mongo_saslstart(mongo_con_manager *manager, mongo_connection *con, mongo_server_options *options, mongo_server_def *server_def, sasl_conn_t *conn, char **out_payload, int *out_payload_len, int32_t *conversation_id, char **error_message)
{
const char *raw_payload;
char encoded_payload[4096];
unsigned int raw_payload_len, encoded_payload_len;
int result;
char *mechanism_list;
const char *mechanism_selected;
sasl_interact_t *client_interact=NULL;
/* Intentionally only send the mechanism we expect to authenticate with, rather then
* list of all supported ones. This is because MongoDB doesn't support negotiating */
switch(server_def->mechanism) {
/* NOTE: We don't use cyrus-sasl for SCRAM-SHA-1, but it's left here as it's easier to support multiple SASL mechanisms this way */
case MONGO_AUTH_MECHANISM_SCRAM_SHA1:
/* cyrus-sasl calls it just "SCRAM" */
mechanism_list = "SCRAM";
break;
case MONGO_AUTH_MECHANISM_GSSAPI:
default:
mechanism_list = "GSSAPI";
}
result = sasl_client_start(conn, mechanism_list, &client_interact, &raw_payload, &raw_payload_len, &mechanism_selected);
if (is_sasl_failure(conn, result, error_message)) {
return NULL;
}
if (result != SASL_CONTINUE) {
*error_message = strdup("Could not negotiate SASL mechanism");
return NULL;
}
result = sasl_encode64(raw_payload, raw_payload_len, encoded_payload, sizeof(encoded_payload), &encoded_payload_len);
if (is_sasl_failure(conn, result, error_message)) {
return NULL;
}
/* We don't care for whatever was mechanism_selected, we carry on with mechanism_list as that contains the only mechanism we want to use */
if (!mongo_connection_authenticate_saslstart(manager, con, options, server_def, mechanism_list, encoded_payload, encoded_payload_len + 1, out_payload, out_payload_len, conversation_id, error_message)) {
return NULL;
}
return conn;
}
static int php_mongo_saslcontinue(mongo_con_manager *manager, mongo_connection *con, mongo_server_options *options, mongo_server_def *server_def, sasl_conn_t *conn, char *step_payload, int step_payload_len, int32_t conversation_id, char **error_message)
{
sasl_interact_t *client_interact=NULL;
/*
* Snippet from sasl.h:
* 4. client calls sasl_client_step()
* 4b. If SASL error, goto 7 or 3
* 4c. If SASL_OK, continue or goto 6 if last server response was success
*/
do {
char base_payload[4096], payload[4096];
unsigned int base_payload_len, payload_len;
const char *out;
unsigned int outlen;
unsigned char done = 0;
int result;
step_payload_len--; /* Remove the \0 from the string */
result = sasl_decode64(step_payload, step_payload_len, base_payload, sizeof(base_payload), &base_payload_len);
if (is_sasl_failure(conn, result, error_message)) {
return 0;
}
result = sasl_client_step(conn, (const char *)base_payload, base_payload_len, &client_interact, &out, &outlen);
if (is_sasl_failure(conn, result, error_message)) {
return 0;
}
result = sasl_encode64(out, outlen, payload, sizeof(base_payload), &payload_len);
if (is_sasl_failure(conn, result, error_message)) {
return 0;
}
if (!mongo_connection_authenticate_saslcontinue(manager, con, options, server_def, conversation_id, payload, payload_len + 1, &step_payload, &step_payload_len, &done, error_message)) {
return 0;
}
if (done) {
break;
}
} while (1);
return 1;
}
/* Callback function used by SASL when requesting the password */
static int sasl_interact_secret(sasl_conn_t *conn, mongo_server_def *server_def, int id, sasl_secret_t **psecret)
{
switch (id) {
case SASL_CB_PASS: {
char *password;
int len;
/* MongoDB uses the legacy MongoDB-CR hash as the SCRAM-SHA-1 password */
if (server_def->mechanism == MONGO_AUTH_MECHANISM_SCRAM_SHA1) {
password = mongo_authenticate_hash_user_password(server_def->username, server_def->password);
} else {
password = server_def->password;
}
len = strlen(password);
*psecret = malloc(sizeof(sasl_secret_t) + len);
(*psecret)->len = len;
memcpy((*psecret)->data, password, (*psecret)->len);
return SASL_OK;
}
}
return SASL_FAIL;
}
/* Callback function used by SASL when requesting the username/authname */
static int sasl_interact_simple(mongo_server_def *server_def, int id, const char **result, unsigned *len)
{
switch (id) {
case SASL_CB_AUTHNAME:
case SASL_CB_USER:
*result = server_def->username;
if (len) {
*len = server_def->username ? strlen(server_def->username) : 0;
}
return SASL_OK;
case SASL_CB_LANGUAGE: /* NOT SUPPORTED */
break;
}
return SASL_FAIL;
}
int php_mongo_io_authenticate_sasl(mongo_con_manager *manager, mongo_connection *con, mongo_server_options *options, mongo_server_def *server_def, char **error_message)
{
int result;
char *initpayload;
int initpayload_len;
sasl_conn_t *conn;
int32_t conversation_id;
sasl_callback_t client_interact [] = {
{ SASL_CB_AUTHNAME, sasl_interact_simple, server_def },
{ SASL_CB_USER, sasl_interact_simple, server_def },
{ SASL_CB_PASS, sasl_interact_secret, server_def },
{ SASL_CB_LIST_END, NULL, NULL }
};
result = sasl_client_new(options->gssapiServiceName, server_def->host, NULL, NULL, client_interact, 0, &conn);
if (result != SASL_OK) {
sasl_dispose(&conn);
*error_message = strdup("Could not initialize a client exchange (SASL) to MongoDB");
return 0;
}
conn = php_mongo_saslstart(manager, con, options, server_def, conn, &initpayload, &initpayload_len, &conversation_id, error_message);
if (!conn) {
/* error message populate by php_mongo_saslstart() */
return 0;
}
if (!php_mongo_saslcontinue(manager, con, options, server_def, conn, initpayload, initpayload_len, conversation_id, error_message)) {
return 0;
}
free(initpayload);
sasl_dispose(&conn);
return 1;
}
int php_mongo_io_authenticate_plain(mongo_con_manager *manager, mongo_connection *con, mongo_server_options *options, mongo_server_def *server_def, char **error_message)
{
char step_payload[4096];
char *out, *plain;
char *mechanism = "PLAIN";
unsigned int step_payload_len, plain_len;
int outlen;
int32_t step_conversation_id;
int result;
plain_len = spprintf(&plain, 0, "%c%s%c%s", '\0', server_def->username, '\0', server_def->password);
result = sasl_encode64(plain, plain_len, step_payload, sizeof(step_payload), &step_payload_len);
if (result != SASL_OK) {
*error_message = strdup("SASL authentication: Could not base64 encode payload");
efree(plain);
return 0;
}
efree(plain);
if (!mongo_connection_authenticate_saslstart(manager, con, options, server_def, mechanism, step_payload, step_payload_len + 1, &out, &outlen, &step_conversation_id, error_message)) {
return 0;
}
free(out);
return 1;
}
#endif
/*
* client-first-message-bare = username "," nonce ["," extensions]
*
* client-first-message = gs2-header client-first-message-bare
* server-first-message = [reserved-mext ","] nonce "," salt ","
* iteration-count ["," extensions]
* client-final-message-without-proof = channel-binding "," nonce ["," extensions]
*
* SaltedPassword := Hi(Normalize(password), salt, i)
* ClientKey := HMAC(SaltedPassword, "Client Key")
* StoredKey := H(ClientKey)
* AuthMessage := client-first-message-bare + "," +
* server-first-message + "," +
* client-final-message-without-proof
* ClientSignature := HMAC(StoredKey, AuthMessage)
* ClientProof := ClientKey XOR ClientSignature
* ServerKey := HMAC(SaltedPassword, "Server Key")
* ServerSignature := HMAC(ServerKey, AuthMessage)
*/
int php_mongo_io_make_client_proof(char *username, char *password, unsigned char *salt_base64, int salt_base64_len, int iterations, char **return_value, int *return_value_len, char *server_first_message, unsigned char *cnonce, char *snonce, unsigned char *server_signature, int *server_signature_len TSRMLS_DC)
{
unsigned char stored_key[PHP_MONGO_SCRAM_HASH_SIZE], client_proof[PHP_MONGO_SCRAM_HASH_SIZE], client_signature[PHP_MONGO_SCRAM_HASH_SIZE];
unsigned char salted_password[PHP_MONGO_SCRAM_HASH_SIZE], client_key[PHP_MONGO_SCRAM_HASH_SIZE], server_key[PHP_MONGO_SCRAM_HASH_SIZE];
unsigned char *salt, *auth_message;
long salted_password_len;
int salt_len, client_key_len, server_key_len;
int auth_message_len, client_signature_len;
int i;
salt = php_base64_decode(salt_base64, salt_base64_len, &salt_len);
/* SaltedPassword := Hi(Normalize(password), salt, i) */
php_mongo_hash_pbkdf2_sha1(password, strlen(password), salt, salt_len, iterations, salted_password, &salted_password_len TSRMLS_CC);
efree(salt);
/* ClientKey := HMAC(SaltedPassword, "Client Key") */
php_mongo_hmac((unsigned char *)PHP_MONGO_SCRAM_CLIENT_KEY, strlen((char *)PHP_MONGO_SCRAM_CLIENT_KEY), (char *)salted_password, salted_password_len, client_key, &client_key_len);
/* StoredKey := H(ClientKey) */
php_mongo_sha1(client_key, client_key_len, stored_key);
/* AuthMessage := client-first-message-bare + "," +
* server-first-message + "," +
* client-final-message-without-proof
*/
auth_message_len = spprintf((char **)&auth_message, 0, "n=%s,r=%s,%s,c=biws,%s", username, cnonce, server_first_message, snonce);
/* ClientSignature := HMAC(StoredKey, AuthMessage) */
php_mongo_hmac(auth_message, auth_message_len, (char *)stored_key, PHP_MONGO_SCRAM_HASH_SIZE, (unsigned char *)client_signature, &client_signature_len);
/* ClientProof := ClientKey XOR ClientSignature */
for (i = 0; i < PHP_MONGO_SCRAM_HASH_SIZE; i++) {
client_proof[i] = client_key[i] ^ client_signature[i];
}
/* ServerKey := HMAC(SaltedPassword, "Server Key") */
php_mongo_hmac((unsigned char *)PHP_MONGO_SCRAM_SERVER_KEY, strlen((char *)PHP_MONGO_SCRAM_SERVER_KEY), (char *)salted_password, salted_password_len, (unsigned char *)server_key, &server_key_len);
/* ServerSignature := HMAC(ServerKey, AuthMessage) */
php_mongo_hmac(auth_message, auth_message_len, (char *)server_key, PHP_MONGO_SCRAM_HASH_SIZE, server_signature, server_signature_len);
efree(auth_message);
*return_value = (char *)php_base64_encode(client_proof, PHP_MONGO_SCRAM_HASH_SIZE, return_value_len);
return 1;
}
/**
* Authenticates a connection using SCRAM-SHA-1
*
* Returns:
* 0: when it didn't work - with the error_message set.
* 1: when it worked
* 2: when no need to authenticate (i.e. no credentials provided)
*/
int mongo_connection_authenticate_mongodb_scram_sha1(mongo_con_manager *manager, mongo_connection *con, mongo_server_options *options, mongo_server_def *server_def, char **error_message)
{
char *client_first_message, *client_first_message_base64;
char *client_final_message, *client_final_message_base64;
int client_first_message_len, client_first_message_base64_len;
int client_final_message_len, client_final_message_base64_len;
char *server_first_message, *server_first_message_base64, *server_first_message_dup;
char *server_final_message, *server_final_message_base64;
int server_first_message_len, server_first_message_base64_len;
int server_final_message_len, server_final_message_base64_len;
char *rnonce, *password, *tok, *proof = NULL;
char *iterationsstr, *salt_base64;
int rskip, iterations, proof_len;
unsigned char cnonce[41];
int32_t step_conversation_id;
unsigned char done = 0;
char *tmp, *username;
int username_len;
unsigned char server_signature[PHP_MONGO_SCRAM_HASH_SIZE];
unsigned char *server_signature_base64;
int server_signature_len, server_signature_base64_len;
TSRMLS_FETCH();
if (!server_def->db || !server_def->username || !server_def->password) {
return 2;
}
/*
* The characters ',' or '=' in usernames are sent as '=2C' and
* '=3D' respectively. If the server receives a username that
* contains '=' not followed by either '2C' or '3D', then the
* server MUST fail the authentication
*/
tmp = php_str_to_str(server_def->username, strlen(server_def->username), "=", 1, "=3D", 3, &username_len);
username = php_str_to_str(tmp, strlen(tmp), ",", 1, "=2C", 3, &username_len);
efree(tmp);
php_mongo_io_make_nonce((char *)cnonce TSRMLS_CC);
/*
* client-first-message = gs2-header client-first-message-bare
* client-first-message-bare = username "," nonce
* username = "n=" saslname
* nonce = "r=" c-nonce [s-nonce]
* ;; Second part provided by server.
* c-nonce = printable (client generated nonce)
* s-nonce = printable (server appending nonce)
* gs2-header = gs2-cbind-flag "," [ authzid ] ","
*
* We don't support GS2, so that becomes "n,,"
* example: n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL
*/
client_first_message_len = spprintf(&client_first_message, 0, "n,,n=%s,r=%s", username, cnonce);
client_first_message_base64 = (char *)php_base64_encode((unsigned char *)client_first_message, client_first_message_len, &client_first_message_base64_len);
if (!mongo_connection_authenticate_saslstart(manager, con, options, server_def, PHP_MONGO_SCRAM_HASH_1, client_first_message_base64, client_first_message_base64_len+1, &server_first_message_base64, &server_first_message_base64_len, &step_conversation_id, error_message)) {
efree(client_first_message);
efree(client_first_message_base64);
efree(username);
/* starting sasl failed, bail out, we do not need to send an error message, as
* mongo_connection_authenticate_saslstart already does so when returning a 0
* error value. */
return 0;
}
efree(client_first_message_base64);
/*
* server-first-message = nonce "," salt "," iteration-count
* nonce = "r=" c-nonce [s-nonce]
* ;; Second part provided by server.
* c-nonce = printable (client generated nonce)
* s-nonce = printable (server appending nonce)
* salt = "s=" base64
* iteration-count = "i=" posit-number
*
* example: r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=10000
*/
server_first_message = (char *)php_base64_decode((unsigned char *)server_first_message_base64, server_first_message_base64_len, &server_first_message_len);
free(server_first_message_base64);
server_first_message_dup = estrdup(server_first_message);
/* the r= from the client_first_message appended with more chars from the server */
rskip = username_len+6; /* n,,n= and the coma before r */
rnonce = php_strtok_r(server_first_message_dup, ",", &tok);
salt_base64 = php_strtok_r(NULL, ",", &tok)+2;
iterationsstr = php_strtok_r(NULL, ",", &tok)+2;
if (rnonce == NULL || salt_base64 == NULL || iterationsstr == NULL) {
efree(server_first_message);
efree(server_first_message_dup);
efree(client_first_message);
/* the server didn't return our hash, bail out */
*error_message = strdup("Server return payload in wrong format");
efree(username);
return 0;
}
if (strncmp(rnonce, client_first_message+rskip, (PHP_MONGO_SCRAM_HASH_SIZE*2)+1-rskip) != 0) {
efree(server_first_message);
efree(server_first_message_dup);
efree(client_first_message);
/* the server didn't return our hash, bail out */
*error_message = strdup("Server return invalid hash");
efree(username);
return 0;
}
efree(client_first_message);
iterations = strtoll(iterationsstr, NULL, 10);
/* MongoDB uses the legacy MongoDB-CR hash as the SCRAM-SHA-1 password */
password = mongo_authenticate_hash_user_password(username, server_def->password);
php_mongo_io_make_client_proof(username, password, (unsigned char*)salt_base64, strlen(salt_base64), iterations, &proof, &proof_len, server_first_message, cnonce, rnonce, server_signature, &server_signature_len TSRMLS_CC);
efree(username);
efree(server_first_message);
free(password);
/*
* c: This REQUIRED attribute specifies the base64-encoded GS2 header
* and channel binding data. It is sent by the client in its second
* authentication message. The attribute data consist of:...
* We don't support GS2 nor channel binding, so set this to:
* biws == base64_encode("n,,")
*
* r: This attribute specifies a sequence of random printable ASCII
* characters excluding ',' (which forms the nonce used as input to
* the hash function). No quoting is applied to this string. As
* described earlier, the client supplies an initial value in its
* first message, and the server augments that value with its own
* nonce in its first response. It is important that this value be
* different for each authentication (see [RFC4086] for more details
* on how to achieve this). The client MUST verify that the initial
* part of the nonce used in subsequent messages is the same as the
* nonce it initially specified. The server MUST verify that the
* nonce sent by the client in the second message is the same as the
* one sent by the server in its first message.
*
* p: This attribute specifies a base64-encoded ClientProof. The
* client computes this value as described in the overview and sends
* it to the server.
*/
/*
* client-final-message =
* client-final-message-without-proof "," proof
*
* client-final-message-without-proof =
* channel-binding "," nonce ["," extensions]
*
* proof = "p=" base64
*
* nonce = "r=" c-nonce [s-nonce]
* ;; Second part provided by server.
*
* channel-binding = "c=" base64
* ;; base64 encoding of cbind-input.
*
* cbind-input = (we don't support these things, see 'c:' explaination above)
*
* example: c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=
*/
client_final_message_len = spprintf(&client_final_message, 0, "c=biws,%s,p=%s", rnonce, proof);
efree(proof);
efree(server_first_message_dup);
/* base64 for the server (payload), or BSON Binary encode.. simpler to base64 */
client_final_message_base64 = (char *)php_base64_encode((unsigned char*)client_final_message, client_final_message_len, &client_final_message_base64_len);
if (!mongo_connection_authenticate_saslcontinue(manager, con, options, server_def, step_conversation_id, client_final_message_base64, client_final_message_base64_len+1, &server_final_message_base64, &server_final_message_base64_len, &done, error_message)) {
efree(client_final_message);
efree(client_final_message_base64);
return 0;
}
efree(client_final_message);
efree(client_final_message_base64);
/* Verify the server signature */
server_final_message = (char *)php_base64_decode((unsigned char*)server_final_message_base64, server_final_message_base64_len, &server_final_message_len);
server_signature_base64 = php_base64_encode((unsigned char*)server_signature, server_signature_len, &server_signature_base64_len);
if (strncmp(server_final_message+2, (char *)server_signature_base64, server_signature_base64_len) != 0) {
efree(server_final_message);
*error_message = strdup("Server returned wrong ServerSignature");
return 0;
}
efree(server_final_message);
efree(server_signature_base64);
free(server_final_message_base64);
/* Extra roundtrip to let the server know we trust her */
if (!mongo_connection_authenticate_saslcontinue(manager, con, options, server_def, step_conversation_id, "", 1, &server_final_message_base64, &server_final_message_base64_len, &done, error_message)) {
free(server_final_message_base64);
return 0;
}
free(server_final_message_base64);
return 1;
}
int php_mongo_io_stream_authenticate(mongo_con_manager *manager, mongo_connection *con, mongo_server_options *options, mongo_server_def *server_def, char **error_message)
{
switch(server_def->mechanism) {
case MONGO_AUTH_MECHANISM_MONGODB_DEFAULT:
if (php_mongo_api_connection_supports_feature(con, PHP_MONGO_API_RELEASE_2_8)) {
return mongo_connection_authenticate_mongodb_scram_sha1(manager, con, options, server_def, error_message);
}
return mongo_connection_authenticate(manager, con, options, server_def, error_message);
case MONGO_AUTH_MECHANISM_MONGODB_CR:
case MONGO_AUTH_MECHANISM_MONGODB_X509:
/* Use the mcon implementation of MongoDB-CR and MongoDB-X509 */
return mongo_connection_authenticate(manager, con, options, server_def, error_message);
case MONGO_AUTH_MECHANISM_SCRAM_SHA1:
return mongo_connection_authenticate_mongodb_scram_sha1(manager, con, options, server_def, error_message);
break;
#if HAVE_MONGO_SASL
case MONGO_AUTH_MECHANISM_GSSAPI:
return php_mongo_io_authenticate_sasl(manager, con, options, server_def, error_message);
case MONGO_AUTH_MECHANISM_PLAIN:
return php_mongo_io_authenticate_plain(manager, con, options, server_def, error_message);
#endif
default:
#if HAVE_MONGO_SASL
*error_message = strdup("Unknown authentication mechanism. Only SCRAM-SHA-1, MongoDB-CR, MONGODB-X509, GSSAPI and PLAIN are supported by this build");
#else
*error_message = strdup("Unknown authentication mechanism. Only SCRAM-SHA-1, MongoDB-CR and MONGODB-X509 are supported by this build");
#endif
}
return 0;
}
void php_mongo_io_make_nonce(char *sha1_str TSRMLS_DC) /* {{{ */
{
unsigned char digest[20];
PHP_SHA1_CTX sha1_context;
size_t to_read = 32;
unsigned char rbuf[64];
#ifdef PHP_WIN32
PHP_SHA1Init(&sha1_context);
if (php_win32_get_random_bytes(rbuf, to_read) == SUCCESS){
PHP_SHA1Update(&sha1_context, rbuf, to_read);
}
#else
int fd;
PHP_SHA1Init(&sha1_context);
fd = VCWD_OPEN("/dev/urandom", O_RDONLY);
if (fd >= 0) {
int n;
while (to_read > 0) {
n = read(fd, rbuf, to_read);
if (n <= 0) break;
PHP_SHA1Update(&sha1_context, rbuf, n);
to_read -= n;
}
close(fd);
}
#endif
PHP_SHA1Final(digest, &sha1_context);
make_sha1_digest(sha1_str, digest);
}
/* }}} */
/*
* Local variables:
* tab-width: 4
* c-basic-offset: 4
* End:
* vim600: fdm=marker
* vim: noet sw=4 ts=4
*/
|
3b2972dd61cdd7e76fb5902974162186b5fd6f79
|
b6f1baed3539eeaaae44f4a6a2b03b53cca84932
|
/Pods/Headers/Private/HXPhotoPicker/HXPhoto3DTouchViewController.h
|
22b0d5c4a91a0aa56d6f5bbc9b1b1aae539418b0
|
[] |
no_license
|
iotjin/JhForm
|
edbb3cd09e4874b7961c84dd20e89a1dfafbf3bb
|
821175a262369716fca39c109857825c928724ae
|
refs/heads/master
| 2022-08-20T02:44:29.863754
| 2022-08-13T03:35:54
| 2022-08-13T03:35:54
| 164,795,840
| 148
| 23
| null | 2021-04-13T03:06:59
| 2019-01-09T05:41:41
|
Objective-C
|
UTF-8
|
C
| false
| false
| 78
|
h
|
HXPhoto3DTouchViewController.h
|
../../../HXPhotoPicker/HXPhotoPicker/Controller/HXPhoto3DTouchViewController.h
|
8605a1b261b845252877d3a243a9e5ae3a591c80
|
c2634ebec1d4448e372d174f459c3cbc03fd1edc
|
/lib/node_modules/@stdlib/ndarray/base/nullary/src/u.c
|
be085c98cf957c181dbeb9a285d17baa7ebd9d03
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"SunPro",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
stdlib-js/stdlib
|
ede11aee78f08e4f78a0bb939cb0bc244850b55b
|
f10c6e7db1a2b15cdd2b6237dd0927466ebd7278
|
refs/heads/develop
| 2023-09-05T03:29:36.368208
| 2023-09-03T22:42:11
| 2023-09-03T22:42:11
| 54,614,238
| 4,163
| 230
|
Apache-2.0
| 2023-09-13T21:26:07
| 2016-03-24T04:19:52
|
JavaScript
|
UTF-8
|
C
| false
| false
| 53,257
|
c
|
u.c
|
/**
* @license Apache-2.0
*
* Copyright (c) 2023 The Stdlib Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* The following is auto-generated. Do not manually edit. See scripts/loops.js.
*/
#include "stdlib/ndarray/base/nullary/u.h"
#include "stdlib/ndarray/base/nullary/typedefs.h"
#include "stdlib/ndarray/base/nullary/macros.h"
#include "stdlib/ndarray/base/nullary/dispatch_object.h"
#include "stdlib/ndarray/base/nullary/dispatch.h"
#include "stdlib/ndarray/ctor.h"
#include <stdint.h>
/**
* Applies a nullary callback and assigns results to elements in a zero-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 0;
*
* // Define the array shape:
* int64_t shape[] = {};
*
* // Define the strides:
* int64_t sx[] = { 0 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_0d( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_0d( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
int8_t status = stdlib_ndarray_iset_uint32( arrays[ 0 ], 0, f() );
if ( status != 0 ) {
return -1;
}
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a one-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 1;
*
* // Define the array shape:
* int64_t shape[] = { 3 };
*
* // Define the strides:
* int64_t sx[] = { 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_1d( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_1d( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_1D_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a two-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 2;
*
* // Define the array shape:
* int64_t shape[] = { 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_2d( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_2d( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_2D_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a two-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 2;
*
* // Define the array shape:
* int64_t shape[] = { 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_2d_blocked( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_2d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_2D_BLOCKED_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a three-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 3;
*
* // Define the array shape:
* int64_t shape[] = { 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_3d( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_3d( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_3D_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a three-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 3;
*
* // Define the array shape:
* int64_t shape[] = { 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_3d_blocked( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_3d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_3D_BLOCKED_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a four-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 4;
*
* // Define the array shape:
* int64_t shape[] = { 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_4d( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_4d( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_4D_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a four-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 4;
*
* // Define the array shape:
* int64_t shape[] = { 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_4d_blocked( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_4d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_4D_BLOCKED_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a five-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 5;
*
* // Define the array shape:
* int64_t shape[] = { 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_5d( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_5d( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_5D_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a five-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 5;
*
* // Define the array shape:
* int64_t shape[] = { 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_5d_blocked( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_5d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_5D_BLOCKED_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a six-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 6;
*
* // Define the array shape:
* int64_t shape[] = { 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_6d( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_6d( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_6D_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a six-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 6;
*
* // Define the array shape:
* int64_t shape[] = { 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_6d_blocked( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_6d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_6D_BLOCKED_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a seven-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 7;
*
* // Define the array shape:
* int64_t shape[] = { 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_7d( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_7d( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_7D_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a seven-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 7;
*
* // Define the array shape:
* int64_t shape[] = { 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_7d_blocked( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_7d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_7D_BLOCKED_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in an eight-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 8;
*
* // Define the array shape:
* int64_t shape[] = { 1, 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_8d( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_8d( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_8D_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in an eight-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 8;
*
* // Define the array shape:
* int64_t shape[] = { 1, 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_8d_blocked( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_8d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_8D_BLOCKED_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a nine-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 9;
*
* // Define the array shape:
* int64_t shape[] = { 1, 1, 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_9d( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_9d( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_9D_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a nine-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 9;
*
* // Define the array shape:
* int64_t shape[] = { 1, 1, 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_9d_blocked( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_9d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_9D_BLOCKED_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a ten-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 10;
*
* // Define the array shape:
* int64_t shape[] = { 1, 1, 1, 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 32, 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_10d( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_10d( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_10D_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in a ten-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 10;
*
* // Define the array shape:
* int64_t shape[] = { 1, 1, 1, 1, 1, 1, 1, 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 32, 32, 32, 32, 32, 32, 32, 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_10d_blocked( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_10d_blocked( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_10D_BLOCKED_LOOP_CLBK( uint32_t )
return 0;
}
/**
* Applies a nullary callback and assigns results to elements in an n-dimensional output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 3;
*
* // Define the array shape:
* int64_t shape[] = { 2, 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 16, 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u_nd( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u_nd( struct ndarray *arrays[], void *fcn ) {
typedef uint32_t func_type( void );
func_type *f = (func_type *)fcn;
STDLIB_NDARRAY_NULLARY_ND_LOOP_CLBK( uint32_t )
return 0;
}
// Define a list of nullary ndarray functions:
static ndarrayNullaryFcn functions[] = {
stdlib_ndarray_u_0d,
stdlib_ndarray_u_1d,
stdlib_ndarray_u_2d,
stdlib_ndarray_u_3d,
stdlib_ndarray_u_4d,
stdlib_ndarray_u_5d,
stdlib_ndarray_u_6d,
stdlib_ndarray_u_7d,
stdlib_ndarray_u_8d,
stdlib_ndarray_u_9d,
stdlib_ndarray_u_10d,
stdlib_ndarray_u_nd
};
// Define a list of nullary ndarray functions implementing loop blocking...
static ndarrayNullaryFcn blocked_functions[] = {
stdlib_ndarray_u_2d_blocked,
stdlib_ndarray_u_3d_blocked,
stdlib_ndarray_u_4d_blocked,
stdlib_ndarray_u_5d_blocked,
stdlib_ndarray_u_6d_blocked,
stdlib_ndarray_u_7d_blocked,
stdlib_ndarray_u_8d_blocked,
stdlib_ndarray_u_9d_blocked,
stdlib_ndarray_u_10d_blocked
};
// Create a nullary function dispatch object:
static const struct ndarrayNullaryDispatchObject obj = {
// Array containing nullary ndarray functions:
functions,
// Number of nullary ndarray functions:
12,
// Array containing nullary ndarray functions using loop blocking:
blocked_functions,
// Number of nullary ndarray functions using loop blocking:
9
};
/**
* Applies a nullary callback and assigns results to elements in an output ndarray.
*
* ## Notes
*
* - If successful, the functions returns `0`; otherwise, the function returns an error code.
*
* @param arrays array whose only element is a pointer to an output array
* @param fcn callback
* @return status code
*
* @example
* #include "stdlib/ndarray/base/nullary/u.h"
* #include "stdlib/ndarray/dtypes.h"
* #include "stdlib/ndarray/index_modes.h"
* #include "stdlib/ndarray/orders.h"
* #include "stdlib/ndarray/ctor.h"
* #include <stdint.h>
* #include <stdlib.h>
* #include <stdio.h>
*
* // Define the ndarray data type:
* enum STDLIB_NDARRAY_DTYPE xdtype = STDLIB_NDARRAY_UINT32;
*
* // Create an underlying byte array:
* uint8_t xbuf[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
*
* // Define the number of dimensions:
* int64_t ndims = 2;
*
* // Define the array shape:
* int64_t shape[] = { 2, 2 };
*
* // Define the strides:
* int64_t sx[] = { 8, 4 };
*
* // Define the index offset:
* int64_t ox = 0;
*
* // Define the array order:
* enum STDLIB_NDARRAY_ORDER order = STDLIB_NDARRAY_ROW_MAJOR;
*
* // Specify the index mode:
* enum STDLIB_NDARRAY_INDEX_MODE imode = STDLIB_NDARRAY_INDEX_ERROR;
*
* // Specify the subscript index modes:
* int8_t submodes[] = { imode };
* int64_t nsubmodes = 1;
*
* // Create an output ndarray:
* struct ndarray *x = stdlib_ndarray_allocate( xdtype, xbuf, ndims, shape, sx, ox, order, imode, nsubmodes, submodes );
* if ( x == NULL ) {
* fprintf( stderr, "Error allocating memory.\n" );
* exit( EXIT_FAILURE );
* }
*
* // Create an array containing a pointer to the ndarray:
* struct ndarray *arrays[] = { x };
*
* // Define a callback:
* static uint32_t fcn( void ) {
* return 10;
* }
*
* // Apply the callback:
* int8_t status = stdlib_ndarray_u( arrays, (void *)fcn );
* if ( status != 0 ) {
* fprintf( stderr, "Error during computation.\n" );
* exit( EXIT_FAILURE );
* }
*
* // ...
*
* // Free allocated memory:
* stdlib_ndarray_free( x );
*/
int8_t stdlib_ndarray_u( struct ndarray *arrays[], void *fcn ) {
return stdlib_ndarray_nullary_dispatch( &obj, arrays, fcn );
}
|
7e503d203f43f71f6a9bd1b040efdc1104373a8a
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/wayland-protocols/gtk/demos/gtk-demo/layoutmanager2.c
|
cc4b60d653701793e963b9c32b491d4d2d083184
|
[
"LGPL-2.0-only",
"Apache-2.0",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
C
| false
| false
| 6,494
|
c
|
layoutmanager2.c
|
/* Layout Manager/Transformation
* #Keywords: GtkLayoutManager, GskTransform
*
* This demo shows how to use transforms in a nontrivial
* way with a custom layout manager. The layout manager places
* icons on a sphere that can be rotated using arrow keys.
*/
#include <gtk/gtk.h>
#include "demo2widget.h"
#include "demo2layout.h"
#include "demochild.h"
GtkWidget *
do_layoutmanager2 (GtkWidget *parent)
{
static GtkWidget *window = NULL;
if (!window)
{
GtkWidget *widget;
GtkWidget *child;
const char *name[] = {
"action-unavailable-symbolic",
"address-book-new-symbolic",
"application-exit-symbolic",
"appointment-new-symbolic",
"bookmark-new-symbolic",
"call-start-symbolic",
"call-stop-symbolic",
"camera-switch-symbolic",
"chat-message-new-symbolic",
"color-select-symbolic",
"contact-new-symbolic",
"document-edit-symbolic",
"document-new-symbolic",
"document-open-recent-symbolic",
"document-open-symbolic",
"document-page-setup-symbolic",
"document-print-preview-symbolic",
"document-print-symbolic",
"document-properties-symbolic",
"document-revert-symbolic-rtl",
"document-revert-symbolic",
"document-save-as-symbolic",
"document-save-symbolic",
"document-send-symbolic",
"edit-clear-all-symbolic",
"edit-clear-symbolic-rtl",
"edit-clear-symbolic",
"edit-copy-symbolic",
"edit-cut-symbolic",
"edit-delete-symbolic",
"edit-find-replace-symbolic",
"edit-find-symbolic",
"edit-paste-symbolic",
"edit-redo-symbolic-rtl",
"edit-redo-symbolic",
"edit-select-all-symbolic",
"edit-select-symbolic",
"edit-undo-symbolic-rtl",
"edit-undo-symbolic",
"error-correct-symbolic",
"find-location-symbolic",
"folder-new-symbolic",
"font-select-symbolic",
"format-indent-less-symbolic-rtl",
"format-indent-less-symbolic",
"format-indent-more-symbolic-rtl",
"format-indent-more-symbolic",
"format-justify-center-symbolic",
"format-justify-fill-symbolic",
"format-justify-left-symbolic",
"format-justify-right-symbolic",
"format-text-bold-symbolic",
"format-text-direction-symbolic-rtl",
"format-text-direction-symbolic",
"format-text-italic-symbolic",
"format-text-strikethrough-symbolic",
"format-text-underline-symbolic",
"go-bottom-symbolic",
"go-down-symbolic",
"go-first-symbolic-rtl",
"go-first-symbolic",
"go-home-symbolic",
"go-jump-symbolic-rtl",
"go-jump-symbolic",
"go-last-symbolic-rtl",
"go-last-symbolic",
"go-next-symbolic-rtl",
"go-next-symbolic",
"go-previous-symbolic-rtl",
"go-previous-symbolic",
"go-top-symbolic",
"go-up-symbolic",
"help-about-symbolic",
"insert-image-symbolic",
"insert-link-symbolic",
"insert-object-symbolic",
"insert-text-symbolic",
"list-add-symbolic",
"list-remove-all-symbolic",
"list-remove-symbolic",
"mail-forward-symbolic",
"mail-mark-important-symbolic",
"mail-mark-junk-symbolic",
"mail-mark-notjunk-symbolic",
"mail-message-new-symbolic",
"mail-reply-all-symbolic",
"mail-reply-sender-symbolic",
"mail-send-receive-symbolic",
"mail-send-symbolic",
"mark-location-symbolic",
"media-eject-symbolic",
"media-playback-pause-symbolic",
"media-playback-start-symbolic",
"media-playback-stop-symbolic",
"media-record-symbolic",
"media-seek-backward-symbolic",
"media-seek-forward-symbolic",
"media-skip-backward-symbolic",
"media-skip-forward-symbolic",
"media-view-subtitles-symbolic",
"object-flip-horizontal-symbolic",
"object-flip-vertical-symbolic",
"object-rotate-left-symbolic",
"object-rotate-right-symbolic",
"object-select-symbolic",
"open-menu-symbolic",
"process-stop-symbolic",
"send-to-symbolic",
"sidebar-hide-symbolic",
"sidebar-show-symbolic",
"star-new-symbolic",
"system-log-out-symbolic",
"system-reboot-symbolic",
"system-run-symbolic",
"system-search-symbolic",
"system-shutdown-symbolic",
"system-switch-user-symbolic",
"tab-new-symbolic",
"tools-check-spelling-symbolic",
"value-decrease-symbolic",
"value-increase-symbolic",
"view-app-grid-symbolic",
"view-conceal-symbolic",
"view-continuous-symbolic",
"view-dual-symbolic",
"view-fullscreen-symbolic",
"view-grid-symbolic",
"view-list-bullet-symbolic",
"view-list-ordered-symbolic",
"view-list-symbolic",
"view-mirror-symbolic",
"view-more-horizontal-symbolic",
"view-more-symbolic",
"view-paged-symbolic",
"view-pin-symbolic",
"view-refresh-symbolic",
"view-restore-symbolic",
"view-reveal-symbolic",
"view-sort-ascending-symbolic",
"view-sort-descending-symbolic",
"zoom-fit-best-symbolic",
"zoom-in-symbolic",
"zoom-original-symbolic",
"zoom-out-symbolic",
};
int i;
window = gtk_window_new ();
gtk_window_set_title (GTK_WINDOW (window), "Layout Manager — Transformation");
gtk_window_set_default_size (GTK_WINDOW (window), 600, 620);
g_object_add_weak_pointer (G_OBJECT (window), (gpointer *)&window);
widget = demo2_widget_new ();
for (i = 0; i < 18 * 36; i++)
{
child = gtk_image_new_from_icon_name (name[i % G_N_ELEMENTS (name)]);
gtk_widget_set_margin_start (child, 4);
gtk_widget_set_margin_end (child, 4);
gtk_widget_set_margin_top (child, 4);
gtk_widget_set_margin_bottom (child, 4);
demo2_widget_add_child (DEMO2_WIDGET (widget), child);
}
gtk_window_set_child (GTK_WINDOW (window), widget);
}
if (!gtk_widget_get_visible (window))
gtk_widget_show (window);
else
gtk_window_destroy (GTK_WINDOW (window));
return window;
}
|
9b04f3e096d3ec2889819bf98b1a6365c4d90dc6
|
700ced8c2a78b76c04b0a07387d9244b3cf8f9b4
|
/nuke/md5.h
|
8c8bc9eacc55c2d2abbb263db560548acc1cd42c
|
[
"MIT"
] |
permissive
|
MercenariesEngineering/openexrid
|
a801eeebfe93ae6bccbe66422fcda4dce4d6cd4d
|
00ee3aa2dc9fd8a3c81b5a9adb9d7edc7f0ff7f1
|
refs/heads/master
| 2023-06-07T12:34:32.174836
| 2023-05-29T12:43:08
| 2023-05-29T12:43:08
| 47,749,894
| 130
| 23
| null | null | null | null |
UTF-8
|
C
| false
| false
| 289
|
h
|
md5.h
|
#pragma once
typedef struct
{
uint32_t total[2];
uint32_t state[4];
uint8_t buffer[64];
}
md5_context;
void md5_starts( md5_context *ctx );
void md5_update( md5_context *ctx, const uint8_t *input, uint32_t length );
void md5_finish( md5_context *ctx, uint8_t digest[16] );
|
987c9ed2b407a773f705d3342d20b919b199d083
|
676acab8ff535019faff7da3afb8eecc3fa127f5
|
/target/cubepilot/cubeorange/libraries/stm32_lib/CMSIS/DSP/DSP_Lib_TestSuite/RefLibs/src/FastMathFunctions/cos.c
|
245942f42a1a56aa12b98ecc2a3882477deaa6da
|
[
"Apache-2.0"
] |
permissive
|
Firmament-Autopilot/FMT-Firmware
|
f8c324577245bd7e91af436954b4ce9421acbb41
|
0212fe89820376bfbedaded519552f6b011a7b8a
|
refs/heads/master
| 2023-09-01T11:37:46.194145
| 2023-08-29T06:33:10
| 2023-08-29T06:33:10
| 402,557,689
| 351
| 143
|
Apache-2.0
| 2023-09-12T05:28:39
| 2021-09-02T20:42:56
|
C
|
UTF-8
|
C
| false
| false
| 249
|
c
|
cos.c
|
#include "ref.h"
q31_t ref_cos_q31(q31_t x)
{
return (q31_t)(cosf((float32_t)x * 6.28318530717959f / 2147483648.0f) * 2147483648.0f);
}
q15_t ref_cos_q15(q15_t x)
{
return (q15_t)(cosf((float32_t)x * 6.28318530717959f / 32768.0f) * 32768.0f);
}
|
6a3a962e171c6c98ce3bd39df2991c7d3258adae
|
104a1d21cf0916eee13cb65056ce1dc6aff59704
|
/contrib/sse_proj/irt/mex/src/defs-oldname.h
|
db5b4409d427ddf4409cf93bc65f04ebfc865e05
|
[
"MIT"
] |
permissive
|
JeffFessler/mirt
|
f82da11fa1af4e6fc747332ab86eb94bcb303a7c
|
e3475cc504a1fb72e096de5df9dc07a51644e533
|
refs/heads/main
| 2023-06-22T02:41:06.886967
| 2023-06-10T13:27:32
| 2023-06-10T13:27:32
| 190,079,355
| 130
| 58
|
MIT
| 2023-06-10T13:20:56
| 2019-06-03T20:42:27
|
MATLAB
|
UTF-8
|
C
| false
| false
| 1,554
|
h
|
defs-oldname.h
|
/*
* defs-oldname.h
* for backwards compatability
*/
#ifndef TRUE
# define TRUE True
#endif
#ifndef FALSE
# define FALSE False
#endif
#define RAND01 Rand01
#define SEEDER Seeder
#define TYPE_CALC TypeCalc
#define SIZE_T Size_t
#define MEMSET Memset
#define MEMCPY Memcpy
#define BZERO Bzero
#define BCOPY Bcopy
#define CALLOC Calloc
#define MALLOC Malloc
#define ALLOC Alloc
#define ALLOC_0 Alloc0
#define FREE Free
#define SWAB Swab
#define ERROR_MSG Msg
#define NOTE Note
#define NOTE1 Note1
#define NOTE2 Note2
#define WARN Warn
#define WARN1 Warn1
#define WARN2 Warn2
#define FAIL Fail
#define FAIL1 Fail1
#define FAIL2 Fail2
#define EXIT Exit
#define FOPEN_0 Fopen0
#define FFLUSH_0 Fflush0
#define FSKIP_0 Fskip0
#define FCLOSE_0 Fclose0
#define FREAD_0 Fread0
#define FWRITE_0 Fwrite0
#define SUCCESS Success
#define FAILURE Failure
#define SQR Sqr
#define MAX Max
#define MIN Min
#define ODD Odd
#define EVEN Even
#define FILE_READ FileRead
#define FILE_WRITE FileWrite
#define INLINE_1VECTOR Inline1vector
#define INLINE_2VECTOR Inline2vector
#define SCALE_VECT VectScale
#define INC_VECT VectInc
#define SET_VECT VectSet
#define NONNEG_VECT VectNonneg
#define ACCUM_VECT VectAccum
#define NORM2_VECT VectNorm2
#define MULT_VECT VectMult
#define DIV_VECT VectDiv
#define ADD_VECT VectAdd
#define ADD_VECT_SCALE VectAddScale
#define SUB_VECT VectSub
#define ADD_VECT_2 VectAdd2
#define INPROD_VECT VectInprod
#define INPROD_VECT2 VectInprod2
#define INPROD_VECT2_0 VectInprod2_0
#define MIN_MAX VectMinMax
#define MIN_MAX_0 VectMinMax0
|
dc41d2c376a617267ae365700d0b323242bdd1c1
|
a7856278e39f6030eacbb0680ca66b57e30ca06b
|
/generation/DirectX/shared/dxgi1_4/shared-dxgi1_4.h
|
c255b11e4c947d09274684299bc9dcc714a017e6
|
[
"MIT"
] |
permissive
|
terrafx/terrafx.interop.windows
|
07f1ac52cbaea6c0f5f8f6147df7a8dd50345f49
|
fadce5a41fa5e6f0282e80e96f033d0a2c130991
|
refs/heads/main
| 2023-07-09T03:56:06.057946
| 2023-06-23T16:01:17
| 2023-06-23T16:01:17
| 192,857,891
| 208
| 46
|
MIT
| 2023-07-07T17:25:55
| 2019-06-20T05:58:46
|
C#
|
UTF-8
|
C
| false
| false
| 51
|
h
|
shared-dxgi1_4.h
|
#include "..\..\..\TerraFX.h"
#include <dxgi1_4.h>
|
cf70f3f75eaec6a8f95c45a30756f2e449735ca3
|
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
|
/SOFTWARE/A64-TERES/linux-a64/drivers/media/usb/stk1160/stk1160-ac97.c
|
c8583c262c3d0b6f055cdf73a77e7d78037e2929
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"Linux-syscall-note",
"GPL-2.0-only",
"GPL-1.0-or-later"
] |
permissive
|
OLIMEX/DIY-LAPTOP
|
ae82f4ee79c641d9aee444db9a75f3f6709afa92
|
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
|
refs/heads/rel3
| 2023-08-04T01:54:19.483792
| 2023-04-03T07:18:12
| 2023-04-03T07:18:12
| 80,094,055
| 507
| 92
|
Apache-2.0
| 2023-04-03T07:05:59
| 2017-01-26T07:25:50
|
C
|
UTF-8
|
C
| false
| false
| 4,003
|
c
|
stk1160-ac97.c
|
/*
* STK1160 driver
*
* Copyright (C) 2012 Ezequiel Garcia
* <elezegarcia--a.t--gmail.com>
*
* Based on Easycap driver by R.M. Thomas
* Copyright (C) 2010 R.M. Thomas
* <rmthomas--a.t--sciolus.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/ac97_codec.h>
#include "stk1160.h"
#include "stk1160-reg.h"
static struct snd_ac97 *stk1160_ac97;
static void stk1160_write_ac97(struct snd_ac97 *ac97, u16 reg, u16 value)
{
struct stk1160 *dev = ac97->private_data;
/* Set codec register address */
stk1160_write_reg(dev, STK1160_AC97_ADDR, reg);
/* Set codec command */
stk1160_write_reg(dev, STK1160_AC97_CMD, value & 0xff);
stk1160_write_reg(dev, STK1160_AC97_CMD + 1, (value & 0xff00) >> 8);
/*
* Set command write bit to initiate write operation.
* The bit will be cleared when transfer is done.
*/
stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x8c);
}
static u16 stk1160_read_ac97(struct snd_ac97 *ac97, u16 reg)
{
struct stk1160 *dev = ac97->private_data;
u8 vall = 0;
u8 valh = 0;
/* Set codec register address */
stk1160_write_reg(dev, STK1160_AC97_ADDR, reg);
/*
* Set command read bit to initiate read operation.
* The bit will be cleared when transfer is done.
*/
stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x8b);
/* Retrieve register value */
stk1160_read_reg(dev, STK1160_AC97_CMD, &vall);
stk1160_read_reg(dev, STK1160_AC97_CMD + 1, &valh);
return (valh << 8) | vall;
}
static void stk1160_reset_ac97(struct snd_ac97 *ac97)
{
struct stk1160 *dev = ac97->private_data;
/* Two-step reset AC97 interface and hardware codec */
stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x94);
stk1160_write_reg(dev, STK1160_AC97CTL_0, 0x88);
/* Set 16-bit audio data and choose L&R channel*/
stk1160_write_reg(dev, STK1160_AC97CTL_1 + 2, 0x01);
}
static struct snd_ac97_bus_ops stk1160_ac97_ops = {
.read = stk1160_read_ac97,
.write = stk1160_write_ac97,
.reset = stk1160_reset_ac97,
};
int stk1160_ac97_register(struct stk1160 *dev)
{
struct snd_card *card = NULL;
struct snd_ac97_bus *ac97_bus;
struct snd_ac97_template ac97_template;
int rc;
/*
* Just want a card to access ac96 controls,
* the actual capture interface will be handled by snd-usb-audio
*/
rc = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
THIS_MODULE, 0, &card);
if (rc < 0)
return rc;
snd_card_set_dev(card, dev->dev);
/* TODO: I'm not sure where should I get these names :-( */
snprintf(card->shortname, sizeof(card->shortname),
"stk1160-mixer");
snprintf(card->longname, sizeof(card->longname),
"stk1160 ac97 codec mixer control");
strncpy(card->driver, dev->dev->driver->name, sizeof(card->driver));
rc = snd_ac97_bus(card, 0, &stk1160_ac97_ops, NULL, &ac97_bus);
if (rc)
goto err;
/* We must set private_data before calling snd_ac97_mixer */
memset(&ac97_template, 0, sizeof(ac97_template));
ac97_template.private_data = dev;
ac97_template.scaps = AC97_SCAP_SKIP_MODEM;
rc = snd_ac97_mixer(ac97_bus, &ac97_template, &stk1160_ac97);
if (rc)
goto err;
dev->snd_card = card;
rc = snd_card_register(card);
if (rc)
goto err;
return 0;
err:
dev->snd_card = NULL;
snd_card_free(card);
return rc;
}
int stk1160_ac97_unregister(struct stk1160 *dev)
{
struct snd_card *card = dev->snd_card;
/*
* We need to check usb_device,
* because ac97 release attempts to communicate with codec
*/
if (card && dev->udev)
snd_card_free(card);
return 0;
}
|
9e4d3d32511a45dff5f8cf79ad9f205e4cdb861a
|
aa3befea459382dc5c01c925653d54f435b3fb0f
|
/arch/xtensa/src/esp32s2/esp32s2_spiflash.h
|
4af4888d43113f797ffc596c6c5d7e9d7b483fd7
|
[
"MIT-open-group",
"BSD-3-Clause",
"HPND-sell-variant",
"BSD-4-Clause-UC",
"LicenseRef-scancode-warranty-disclaimer",
"MIT-0",
"LicenseRef-scancode-bsd-atmel",
"LicenseRef-scancode-gary-s-brown",
"LicenseRef-scancode-proprietary-license",
"SunPro",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-other-permissive",
"HPND",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"GPL-1.0-or-later",
"CC-BY-2.0",
"CC-BY-4.0"
] |
permissive
|
apache/nuttx
|
14519a7bff4a87935d94fb8fb2b19edb501c7cec
|
606b6d9310fb25c7d92c6f95bf61737e3c79fa0f
|
refs/heads/master
| 2023-08-25T06:55:45.822534
| 2023-08-23T16:03:31
| 2023-08-24T21:25:47
| 228,103,273
| 407
| 241
|
Apache-2.0
| 2023-09-14T18:26:05
| 2019-12-14T23:27:55
|
C
|
UTF-8
|
C
| false
| false
| 3,011
|
h
|
esp32s2_spiflash.h
|
/****************************************************************************
* arch/xtensa/src/esp32s2/esp32s2_spiflash.h
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __ARCH_XTENSA_SRC_ESP32S2_ESP32S2_SPIFLASH_H
#define __ARCH_XTENSA_SRC_ESP32S2_ESP32S2_SPIFLASH_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <sys/types.h>
#include <stdint.h>
#include <nuttx/mtd/mtd.h>
#ifndef __ASSEMBLY__
#undef EXTERN
#if defined(__cplusplus)
#define EXTERN extern "C"
extern "C"
{
#else
#define EXTERN extern
#endif
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
/****************************************************************************
* Name: spi_flash_read_encrypted
*
* Description:
*
* Read data from Encrypted Flash.
*
* If flash encryption is enabled, this function will transparently
* decrypt data as it is read.
* If flash encryption is not enabled, this function behaves the same as
* spi_flash_read().
*
* See esp_flash_encryption_enabled() for a function to check if flash
* encryption is enabled.
*
* Parameters:
* addr - source address of the data in Flash.
* buffer - pointer to the destination buffer
* size - length of data
*
* Returned Values: esp_err_t
*
****************************************************************************/
int spi_flash_read_encrypted(uint32_t addr, void *buffer, uint32_t size);
/****************************************************************************
* Name: esp32s2_spiflash_init
*
* Description:
* Initialize ESP32-S3 SPI flash driver.
*
* Returned Value:
* OK if success or a negative value if fail.
*
****************************************************************************/
int esp32s2_spiflash_init(void);
#ifdef __cplusplus
}
#endif
#undef EXTERN
#endif /* __ASSEMBLY__ */
#endif /* __ARCH_XTENSA_SRC_ESP32S2_ESP32S2_SPIFLASH_H */
|
d519ac850a34d34f4232ef4ab43d7ba60a41a74a
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/devel/arduino-avrdude/files/patch-bitbang.c
|
43a74468901c5c233131f0385af8ba28cb8931f0
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
C
| false
| false
| 319
|
c
|
patch-bitbang.c
|
--- bitbang.c.orig 2017-03-30 13:30:41 UTC
+++ bitbang.c
@@ -331,7 +331,7 @@ int bitbang_cmd(PROGRAMMER * pgm, const unsigned char
res[i] = bitbang_txrx(pgm, cmd[i]);
}
- if(verbose >= 2)
+ if(verbose > 4)
{
avrdude_message(MSG_NOTICE2, "bitbang_cmd(): [ ");
for(i = 0; i < 4; i++)
|
e7839e87ca0b4d331bae671b71371647648dbc38
|
eecd5e4c50d8b78a769bcc2675250576bed34066
|
/src/ts/adapt/impls/none/adaptnone.c
|
859f561185acf904938636934b77802ef6e33b7b
|
[
"BSD-2-Clause"
] |
permissive
|
petsc/petsc
|
3b1a04fea71858e0292f9fd4d04ea11618c50969
|
9c5460f9064ca60dd71a234a1f6faf93e7a6b0c9
|
refs/heads/main
| 2023-08-17T20:51:16.507070
| 2023-08-17T16:08:06
| 2023-08-17T16:08:06
| 8,691,401
| 341
| 169
|
NOASSERTION
| 2023-03-29T11:02:58
| 2013-03-10T20:55:21
|
C
|
UTF-8
|
C
| false
| false
| 1,037
|
c
|
adaptnone.c
|
#include <petsc/private/tsimpl.h> /*I "petscts.h" I*/
static PetscErrorCode TSAdaptChoose_None(TSAdapt adapt, TS ts, PetscReal h, PetscInt *next_sc, PetscReal *next_h, PetscBool *accept, PetscReal *wlte, PetscReal *wltea, PetscReal *wlter)
{
PetscFunctionBegin;
*accept = PETSC_TRUE;
*next_sc = 0; /* Reuse the same order scheme */
*next_h = h; /* Reuse the old step */
*wlte = -1; /* Weighted local truncation error was not evaluated */
*wltea = -1; /* Weighted absolute local truncation error was not evaluated */
*wlter = -1; /* Weighted relative local truncation error was not evaluated */
PetscFunctionReturn(PETSC_SUCCESS);
}
/*MC
TSADAPTNONE - Time stepping controller that always accepts the current step and does not change it
Level: intermediate
.seealso: [](ch_ts), `TS`, `TSAdapt`, `TSAdaptChoose()`, `TSAdaptType`
M*/
PETSC_EXTERN PetscErrorCode TSAdaptCreate_None(TSAdapt adapt)
{
PetscFunctionBegin;
adapt->ops->choose = TSAdaptChoose_None;
PetscFunctionReturn(PETSC_SUCCESS);
}
|
773cd5e128896e4b73984dc9d579f446522d7a49
|
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
|
/SOFTWARE/A64-TERES/linux-a64/kernel/events/core.c
|
0f52078396738a4098bd720535e15e2686ae4f57
|
[
"Linux-syscall-note",
"GPL-2.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
OLIMEX/DIY-LAPTOP
|
ae82f4ee79c641d9aee444db9a75f3f6709afa92
|
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
|
refs/heads/rel3
| 2023-08-04T01:54:19.483792
| 2023-04-03T07:18:12
| 2023-04-03T07:18:12
| 80,094,055
| 507
| 92
|
Apache-2.0
| 2023-04-03T07:05:59
| 2017-01-26T07:25:50
|
C
|
UTF-8
|
C
| false
| false
| 185,266
|
c
|
core.c
|
/*
* Performance events core code:
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* For licensing details see kernel-base/COPYING
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/idr.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/tick.h>
#include <linux/sysfs.h>
#include <linux/dcache.h>
#include <linux/percpu.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/vmstat.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/ftrace_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
#include <linux/cgroup.h>
#include <linux/compat.h>
#include "internal.h"
#include <asm/irq_regs.h>
struct remote_function_call {
struct task_struct *p;
int (*func)(void *info);
void *info;
int ret;
};
static void remote_function(void *data)
{
struct remote_function_call *tfc = data;
struct task_struct *p = tfc->p;
if (p) {
tfc->ret = -EAGAIN;
if (task_cpu(p) != smp_processor_id() || !task_curr(p))
return;
}
tfc->ret = tfc->func(tfc->info);
}
/**
* task_function_call - call a function on the cpu on which a task runs
* @p: the task to evaluate
* @func: the function to be called
* @info: the function call argument
*
* Calls the function @func when the task is currently running. This might
* be on the current CPU, which just calls the function directly
*
* returns: @func return value, or
* -ESRCH - when the process isn't running
* -EAGAIN - when the process moved away
*/
static int
task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
{
struct remote_function_call data = {
.p = p,
.func = func,
.info = info,
.ret = -ESRCH, /* No such (running) process */
};
if (task_curr(p))
smp_call_function_single(task_cpu(p), remote_function, &data, 1);
return data.ret;
}
/**
* cpu_function_call - call a function on the cpu
* @func: the function to be called
* @info: the function call argument
*
* Calls the function @func on the remote cpu.
*
* returns: @func return value or -ENXIO when the cpu is offline
*/
static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
{
struct remote_function_call data = {
.p = NULL,
.func = func,
.info = info,
.ret = -ENXIO, /* No such CPU */
};
smp_call_function_single(cpu, remote_function, &data, 1);
return data.ret;
}
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
PERF_FLAG_FD_OUTPUT |\
PERF_FLAG_PID_CGROUP)
/*
* branch priv levels that need permission checks
*/
#define PERF_SAMPLE_BRANCH_PERM_PLM \
(PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
enum event_type_t {
EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
/*
* perf_sched_events : >0 events exist
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
*/
struct static_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
int sysctl_perf_event_paranoid __read_mostly = 1;
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
/*
* max perf event sample rate
*/
#define DEFAULT_MAX_SAMPLE_RATE 100000
#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
#define DEFAULT_CPU_TIME_MAX_PERCENT 25
int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
static atomic_t perf_sample_allowed_ns __read_mostly =
ATOMIC_INIT( DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100);
void update_perf_cpu_limits(void)
{
u64 tmp = perf_sample_period_ns;
tmp *= sysctl_perf_cpu_time_max_percent;
do_div(tmp, 100);
atomic_set(&perf_sample_allowed_ns, tmp);
}
int perf_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
update_perf_cpu_limits();
return 0;
}
int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
update_perf_cpu_limits();
return 0;
}
/*
* perf samples are done in some very critical code paths (NMIs).
* If they take too much CPU time, the system can lock up and not
* get any real work done. This will drop the sample rate when
* we detect that events are taking too long.
*/
#define NR_ACCUMULATED_SAMPLES 128
DEFINE_PER_CPU(u64, running_sample_length);
void perf_sample_event_took(u64 sample_len_ns)
{
u64 avg_local_sample_len;
u64 local_samples_len;
if (atomic_read(&perf_sample_allowed_ns) == 0)
return;
/* decay the counter by 1 average sample */
local_samples_len = __get_cpu_var(running_sample_length);
local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
local_samples_len += sample_len_ns;
__get_cpu_var(running_sample_length) = local_samples_len;
/*
* note: this will be biased artifically low until we have
* seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
* from having to maintain a count.
*/
avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
if (avg_local_sample_len <= atomic_read(&perf_sample_allowed_ns))
return;
if (max_samples_per_tick <= 1)
return;
max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
printk_ratelimited(KERN_WARNING
"perf samples too long (%lld > %d), lowering "
"kernel.perf_event_max_sample_rate to %d\n",
avg_local_sample_len,
atomic_read(&perf_sample_allowed_ns),
sysctl_perf_event_sample_rate);
update_perf_cpu_limits();
}
static atomic64_t perf_event_id;
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type);
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void)
{
return "pmu";
}
static inline u64 perf_clock(void)
{
return local_clock();
}
static inline struct perf_cpu_context *
__get_cpu_context(struct perf_event_context *ctx)
{
return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
}
static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx)
raw_spin_lock(&ctx->lock);
}
static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
if (ctx)
raw_spin_unlock(&ctx->lock);
raw_spin_unlock(&cpuctx->ctx.lock);
}
#ifdef CONFIG_CGROUP_PERF
/*
* perf_cgroup_info keeps track of time_enabled for a cgroup.
* This is a per-cpu dynamically allocated data structure.
*/
struct perf_cgroup_info {
u64 time;
u64 timestamp;
};
struct perf_cgroup {
struct cgroup_subsys_state css;
struct perf_cgroup_info __percpu *info;
};
/*
* Must ensure cgroup is pinned (css_get) before calling
* this function. In other words, we cannot call this function
* if there is no cgroup event for the current CPU context.
*/
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
return container_of(task_subsys_state(task, perf_subsys_id),
struct perf_cgroup, css);
}
static inline bool
perf_cgroup_match(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/* @event doesn't care about cgroup */
if (!event->cgrp)
return true;
/* wants specific cgroup scope but @cpuctx isn't associated with any */
if (!cpuctx->cgrp)
return false;
/*
* Cgroup scoping is recursive. An event enabled for a cgroup is
* also enabled for all its descendant cgroups. If @cpuctx's
* cgroup is a descendant of @event's (the test covers identity
* case), it's a match.
*/
return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
event->cgrp->css.cgroup);
}
static inline bool perf_tryget_cgroup(struct perf_event *event)
{
return css_tryget(&event->cgrp->css);
}
static inline void perf_put_cgroup(struct perf_event *event)
{
css_put(&event->cgrp->css);
}
static inline void perf_detach_cgroup(struct perf_event *event)
{
perf_put_cgroup(event);
event->cgrp = NULL;
}
static inline int is_cgroup_event(struct perf_event *event)
{
return event->cgrp != NULL;
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
return t->time;
}
static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
{
struct perf_cgroup_info *info;
u64 now;
now = perf_clock();
info = this_cpu_ptr(cgrp->info);
info->time += now - info->timestamp;
info->timestamp = now;
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
struct perf_cgroup *cgrp_out = cpuctx->cgrp;
if (cgrp_out)
__update_cgrp_time(cgrp_out);
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
struct perf_cgroup *cgrp;
/*
* ensure we access cgroup data only when needed and
* when we know the cgroup is pinned (css_get)
*/
if (!is_cgroup_event(event))
return;
cgrp = perf_cgroup_from_task(current);
/*
* Do not update time when cgroup is not active
*/
if (cgrp == event->cgrp)
__update_cgrp_time(event->cgrp);
}
static inline void
perf_cgroup_set_timestamp(struct task_struct *task,
struct perf_event_context *ctx)
{
struct perf_cgroup *cgrp;
struct perf_cgroup_info *info;
/*
* ctx->lock held by caller
* ensure we do not access cgroup data
* unless we have the cgroup pinned (css_get)
*/
if (!task || !ctx->nr_cgroups)
return;
cgrp = perf_cgroup_from_task(task);
info = this_cpu_ptr(cgrp->info);
info->timestamp = ctx->timestamp;
}
#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
/*
* reschedule events based on the cgroup constraint of task.
*
* mode SWOUT : schedule out everything
* mode SWIN : schedule in based on cgroup for next
*/
void perf_cgroup_switch(struct task_struct *task, int mode)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
unsigned long flags;
/*
* disable interrupts to avoid geting nr_cgroup
* changes via __perf_event_disable(). Also
* avoids preemption.
*/
local_irq_save(flags);
/*
* we reschedule only in the presence of cgroup
* constrained events.
*/
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
continue; /* ensure we process each cpuctx once */
/*
* perf_cgroup_events says at least one
* context on this CPU has cgroup events.
*
* ctx->nr_cgroups reports the number of cgroup
* events for a context.
*/
if (cpuctx->ctx.nr_cgroups > 0) {
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
if (mode & PERF_CGROUP_SWOUT) {
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
/*
* must not be done before ctxswout due
* to event_filter_match() in event_sched_out()
*/
cpuctx->cgrp = NULL;
}
if (mode & PERF_CGROUP_SWIN) {
WARN_ON_ONCE(cpuctx->cgrp);
/*
* set cgrp before ctxsw in to allow
* event_filter_match() to not have to pass
* task around
*/
cpuctx->cgrp = perf_cgroup_from_task(task);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
}
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
}
rcu_read_unlock();
local_irq_restore(flags);
}
static inline void perf_cgroup_sched_out(struct task_struct *task,
struct task_struct *next)
{
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
/*
* we come here when we know perf_cgroup_events > 0
*/
cgrp1 = perf_cgroup_from_task(task);
/*
* next is NULL when called from perf_event_enable_on_exec()
* that will systematically cause a cgroup_switch()
*/
if (next)
cgrp2 = perf_cgroup_from_task(next);
/*
* only schedule out current cgroup events if we know
* that we are switching to a different cgroup. Otherwise,
* do no touch the cgroup events.
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
/*
* we come here when we know perf_cgroup_events > 0
*/
cgrp1 = perf_cgroup_from_task(task);
/* prev can never be NULL */
cgrp2 = perf_cgroup_from_task(prev);
/*
* only need to schedule in cgroup events if we are changing
* cgroup during ctxsw. Cgroup events were not scheduled
* out of ctxsw out if that was not the case.
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
}
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
struct perf_cgroup *cgrp;
struct cgroup_subsys_state *css;
struct fd f = fdget(fd);
int ret = 0;
if (!f.file)
return -EBADF;
css = cgroup_css_from_dir(f.file, perf_subsys_id);
if (IS_ERR(css)) {
ret = PTR_ERR(css);
goto out;
}
cgrp = container_of(css, struct perf_cgroup, css);
event->cgrp = cgrp;
/* must be done before we fput() the file */
if (!perf_tryget_cgroup(event)) {
event->cgrp = NULL;
ret = -ENOENT;
goto out;
}
/*
* all events in a group must monitor
* the same cgroup because a task belongs
* to only one perf cgroup at a time
*/
if (group_leader && group_leader->cgrp != cgrp) {
perf_detach_cgroup(event);
ret = -EINVAL;
}
out:
fdput(f);
return ret;
}
static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
event->shadow_ctx_time = now - t->timestamp;
}
static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
/*
* when the current task's perf cgroup does not match
* the event's, we need to remember to call the
* perf_mark_enable() function the first time a task with
* a matching perf cgroup is scheduled in.
*/
if (is_cgroup_event(event) && !perf_cgroup_match(event))
event->cgrp_defer_enabled = 1;
}
static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
if (!event->cgrp_defer_enabled)
return;
event->cgrp_defer_enabled = 0;
event->tstamp_enabled = tstamp - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) {
if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
sub->tstamp_enabled = tstamp - sub->total_time_enabled;
sub->cgrp_defer_enabled = 0;
}
}
}
#else /* !CONFIG_CGROUP_PERF */
static inline bool
perf_cgroup_match(struct perf_event *event)
{
return true;
}
static inline void perf_detach_cgroup(struct perf_event *event)
{}
static inline int is_cgroup_event(struct perf_event *event)
{
return 0;
}
static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
{
return 0;
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
}
static inline void perf_cgroup_sched_out(struct task_struct *task,
struct task_struct *next)
{
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct task_struct *task)
{
}
static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
return -EINVAL;
}
static inline void
perf_cgroup_set_timestamp(struct task_struct *task,
struct perf_event_context *ctx)
{
}
void
perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}
static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
return 0;
}
static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
}
static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
struct perf_event_context *ctx)
{
}
#endif
void perf_pmu_disable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
if (!(*count)++)
pmu->pmu_disable(pmu);
}
void perf_pmu_enable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
if (!--(*count))
pmu->pmu_enable(pmu);
}
static DEFINE_PER_CPU(struct list_head, rotation_list);
/*
* perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
* because they're strictly cpu affine and rotate_start is called with IRQs
* disabled, while rotate_context is called from IRQ context.
*/
static void perf_pmu_rotate_start(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
struct list_head *head = &__get_cpu_var(rotation_list);
WARN_ON(!irqs_disabled());
if (list_empty(&cpuctx->rotation_list)) {
int was_empty = list_empty(head);
list_add(&cpuctx->rotation_list, head);
if (was_empty)
tick_nohz_full_kick();
}
}
static void get_ctx(struct perf_event_context *ctx)
{
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
}
static void put_ctx(struct perf_event_context *ctx)
{
if (atomic_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task)
put_task_struct(ctx->task);
kfree_rcu(ctx, rcu_head);
}
}
static void unclone_ctx(struct perf_event_context *ctx)
{
if (ctx->parent_ctx) {
put_ctx(ctx->parent_ctx);
ctx->parent_ctx = NULL;
}
}
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{
/*
* only top level events have the pid namespace they were created in
*/
if (event->parent)
event = event->parent;
return task_tgid_nr_ns(p, event->ns);
}
static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{
/*
* only top level events have the pid namespace they were created in
*/
if (event->parent)
event = event->parent;
return task_pid_nr_ns(p, event->ns);
}
/*
* If we inherit events we want to return the parent event id
* to userspace.
*/
static u64 primary_event_id(struct perf_event *event)
{
u64 id = event->id;
if (event->parent)
id = event->parent->id;
return id;
}
/*
* Get the perf_event_context for a task and lock it.
* This has to cope with with the fact that until it is locked,
* the context could get moved to another task.
*/
static struct perf_event_context *
perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
{
struct perf_event_context *ctx;
retry:
/*
* One of the few rules of preemptible RCU is that one cannot do
* rcu_read_unlock() while holding a scheduler (or nested) lock when
* part of the read side critical section was preemptible -- see
* rcu_read_unlock_special().
*
* Since ctx->lock nests under rq->lock we must ensure the entire read
* side critical section is non-preemptible.
*/
preempt_disable();
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
if (ctx) {
/*
* If this context is a clone of another, it might
* get swapped for another underneath us by
* perf_event_task_sched_out, though the
* rcu_read_lock() protects us from any context
* getting freed. Lock the context and check if it
* got swapped before we could get the lock, and retry
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
raw_spin_lock_irqsave(&ctx->lock, *flags);
if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
rcu_read_unlock();
preempt_enable();
goto retry;
}
if (!atomic_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
ctx = NULL;
}
}
rcu_read_unlock();
preempt_enable();
return ctx;
}
/*
* Get the context for a task and increment its pin_count so it
* can't get swapped to another task. This also increments its
* reference count so that the context can't get freed.
*/
static struct perf_event_context *
perf_pin_task_context(struct task_struct *task, int ctxn)
{
struct perf_event_context *ctx;
unsigned long flags;
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ctx;
}
static void perf_unpin_context(struct perf_event_context *ctx)
{
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
/*
* Update the record of the current time in a context.
*/
static void update_context_time(struct perf_event_context *ctx)
{
u64 now = perf_clock();
ctx->time += now - ctx->timestamp;
ctx->timestamp = now;
}
static u64 perf_event_time(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
if (is_cgroup_event(event))
return perf_cgroup_event_time(event);
return ctx ? ctx->time : 0;
}
/*
* Update the total_time_enabled and total_time_running fields for a event.
* The caller of this function needs to hold the ctx->lock.
*/
static void update_event_times(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
u64 run_end;
if (event->state < PERF_EVENT_STATE_INACTIVE ||
event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
return;
/*
* in cgroup mode, time_enabled represents
* the time the event was enabled AND active
* tasks were in the monitored cgroup. This is
* independent of the activity of the context as
* there may be a mix of cgroup and non-cgroup events.
*
* That is why we treat cgroup events differently
* here.
*/
if (is_cgroup_event(event))
run_end = perf_cgroup_event_time(event);
else if (ctx->is_active)
run_end = ctx->time;
else
run_end = event->tstamp_stopped;
event->total_time_enabled = run_end - event->tstamp_enabled;
if (event->state == PERF_EVENT_STATE_INACTIVE)
run_end = event->tstamp_stopped;
else
run_end = perf_event_time(event);
event->total_time_running = run_end - event->tstamp_running;
}
/*
* Update total_time_enabled and total_time_running for all events in a group.
*/
static void update_group_times(struct perf_event *leader)
{
struct perf_event *event;
update_event_times(leader);
list_for_each_entry(event, &leader->sibling_list, group_entry)
update_event_times(event);
}
static struct list_head *
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{
if (event->attr.pinned)
return &ctx->pinned_groups;
else
return &ctx->flexible_groups;
}
/*
* Add a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
{
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
event->attach_state |= PERF_ATTACH_CONTEXT;
/*
* If we're a stand alone event or group leader, we go to the context
* list, group events are kept attached to the group so that
* perf_group_detach can, at all times, locate all siblings.
*/
if (event->group_leader == event) {
struct list_head *list;
if (is_software_event(event))
event->group_flags |= PERF_GROUP_SOFTWARE;
list = ctx_group_list(event, ctx);
list_add_tail(&event->group_entry, list);
}
if (is_cgroup_event(event))
ctx->nr_cgroups++;
if (has_branch_stack(event))
ctx->nr_branch_stack++;
list_add_rcu(&event->event_entry, &ctx->event_list);
if (!ctx->nr_events)
perf_pmu_rotate_start(ctx->pmu);
ctx->nr_events++;
if (event->attr.inherit_stat)
ctx->nr_stat++;
}
/*
* Initialize event state based on the perf_event_attr::disabled.
*/
static inline void perf_event__state_init(struct perf_event *event)
{
event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
PERF_EVENT_STATE_INACTIVE;
}
/*
* Called at perf_event creation and when events are attached/detached from a
* group.
*/
static void perf_event__read_size(struct perf_event *event)
{
int entry = sizeof(u64); /* value */
int size = 0;
int nr = 1;
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_GROUP) {
nr += event->group_leader->nr_siblings;
size += sizeof(u64);
}
size += entry * nr;
event->read_size = size;
}
static void perf_event__header_size(struct perf_event *event)
{
struct perf_sample_data *data;
u64 sample_type = event->attr.sample_type;
u16 size = 0;
perf_event__read_size(event);
if (sample_type & PERF_SAMPLE_IP)
size += sizeof(data->ip);
if (sample_type & PERF_SAMPLE_ADDR)
size += sizeof(data->addr);
if (sample_type & PERF_SAMPLE_PERIOD)
size += sizeof(data->period);
if (sample_type & PERF_SAMPLE_WEIGHT)
size += sizeof(data->weight);
if (sample_type & PERF_SAMPLE_READ)
size += event->read_size;
if (sample_type & PERF_SAMPLE_DATA_SRC)
size += sizeof(data->data_src.val);
event->header_size = size;
}
static void perf_event__id_header_size(struct perf_event *event)
{
struct perf_sample_data *data;
u64 sample_type = event->attr.sample_type;
u16 size = 0;
if (sample_type & PERF_SAMPLE_TID)
size += sizeof(data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
size += sizeof(data->time);
if (sample_type & PERF_SAMPLE_ID)
size += sizeof(data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
size += sizeof(data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
size += sizeof(data->cpu_entry);
event->id_header_size = size;
}
static void perf_group_attach(struct perf_event *event)
{
struct perf_event *group_leader = event->group_leader, *pos;
/*
* We can have double attach due to group movement in perf_event_open.
*/
if (event->attach_state & PERF_ATTACH_GROUP)
return;
event->attach_state |= PERF_ATTACH_GROUP;
if (group_leader == event)
return;
if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
!is_software_event(event))
group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
list_add_tail(&event->group_entry, &group_leader->sibling_list);
group_leader->nr_siblings++;
perf_event__header_size(group_leader);
list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
perf_event__header_size(pos);
}
/*
* Remove a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx;
/*
* We can have double detach due to exit/hot-unplug + close.
*/
if (!(event->attach_state & PERF_ATTACH_CONTEXT))
return;
event->attach_state &= ~PERF_ATTACH_CONTEXT;
if (is_cgroup_event(event)) {
ctx->nr_cgroups--;
cpuctx = __get_cpu_context(ctx);
/*
* if there are no more cgroup events
* then cler cgrp to avoid stale pointer
* in update_cgrp_time_from_cpuctx()
*/
if (!ctx->nr_cgroups)
cpuctx->cgrp = NULL;
}
if (has_branch_stack(event))
ctx->nr_branch_stack--;
ctx->nr_events--;
if (event->attr.inherit_stat)
ctx->nr_stat--;
list_del_rcu(&event->event_entry);
if (event->group_leader == event)
list_del_init(&event->group_entry);
update_group_times(event);
/*
* If event was in error state, then keep it
* that way, otherwise bogus counts will be
* returned on read(). The only way to get out
* of error state is by explicit re-enabling
* of the event
*/
if (event->state > PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_OFF;
}
static void perf_group_detach(struct perf_event *event)
{
struct perf_event *sibling, *tmp;
struct list_head *list = NULL;
/*
* We can have double detach due to exit/hot-unplug + close.
*/
if (!(event->attach_state & PERF_ATTACH_GROUP))
return;
event->attach_state &= ~PERF_ATTACH_GROUP;
/*
* If this is a sibling, remove it from its group.
*/
if (event->group_leader != event) {
list_del_init(&event->group_entry);
event->group_leader->nr_siblings--;
goto out;
}
if (!list_empty(&event->group_entry))
list = &event->group_entry;
/*
* If this was a group event with sibling events then
* upgrade the siblings to singleton events by adding them
* to whatever list we are on.
*/
list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
if (list)
list_move_tail(&sibling->group_entry, list);
sibling->group_leader = sibling;
/* Inherit group flags from the previous leader */
sibling->group_flags = event->group_flags;
}
out:
perf_event__header_size(event->group_leader);
list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
perf_event__header_size(tmp);
}
static inline int
event_filter_match(struct perf_event *event)
{
return (event->cpu == -1 || event->cpu == smp_processor_id())
&& perf_cgroup_match(event);
}
static void
event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
u64 delta;
/*
* An event which could not be activated because of
* filter mismatch still needs to have its timings
* maintained, otherwise bogus information is return
* via read() for time_enabled, time_running:
*/
if (event->state == PERF_EVENT_STATE_INACTIVE
&& !event_filter_match(event)) {
delta = tstamp - event->tstamp_stopped;
event->tstamp_running += delta;
event->tstamp_stopped = tstamp;
}
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF;
}
event->tstamp_stopped = tstamp;
event->pmu->del(event, 0);
event->oncpu = -1;
if (!is_software_event(event))
cpuctx->active_oncpu--;
ctx->nr_active--;
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
}
static void
group_sched_out(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event;
int state = group_event->state;
event_sched_out(group_event, cpuctx, ctx);
/*
* Schedule out siblings (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry)
event_sched_out(event, cpuctx, ctx);
if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
struct remove_event {
struct perf_event *event;
bool detach_group;
};
/*
* Cross CPU call to remove a performance event
*
* We disable the event on the hardware level first. After that we
* remove it from the context list.
*/
static int __perf_remove_from_context(void *info)
{
struct remove_event *re = info;
struct perf_event *event = re->event;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
raw_spin_lock(&ctx->lock);
event_sched_out(event, cpuctx, ctx);
if (re->detach_group)
perf_group_detach(event);
list_del_event(event, ctx);
if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
ctx->is_active = 0;
cpuctx->task_ctx = NULL;
}
raw_spin_unlock(&ctx->lock);
return 0;
}
/*
* Remove the event from a task's (or a CPU's) list of events.
*
* CPU events are removed with a smp call. For task events we only
* call when the task is on a CPU.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This is OK when called from perf_release since
* that only calls us on the top-level context, which can't be a clone.
* When called from perf_event_exit_task, it's OK because the
* context has been detached from its task.
*/
static void perf_remove_from_context(struct perf_event *event, bool detach_group)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
struct remove_event re = {
.event = event,
.detach_group = detach_group,
};
lockdep_assert_held(&ctx->mutex);
if (!task) {
/*
* Per cpu events are removed via an smp call and
* the removal is always successful.
*/
cpu_function_call(event->cpu, __perf_remove_from_context, &re);
return;
}
retry:
if (!task_function_call(task, __perf_remove_from_context, &re))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If we failed to find a running task, but find the context active now
* that we've acquired the ctx->lock, retry.
*/
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
/*
* Reload the task pointer, it might have been changed by
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
goto retry;
}
/*
* Since the task isn't running, its safe to remove the event, us
* holding the ctx->lock ensures the task won't get scheduled in.
*/
if (detach_group)
perf_group_detach(event);
list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
/*
* Cross CPU call to disable a performance event
*/
int __perf_event_disable(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/*
* If this is a per-task event, need to check whether this
* event's task is the current task on this cpu.
*
* Can trigger due to concurrent perf_event_context_sched_out()
* flipping contexts around.
*/
if (ctx->task && cpuctx->task_ctx != ctx)
return -EINVAL;
raw_spin_lock(&ctx->lock);
/*
* If the event is on, turn it off.
* If it is in error state, leave it in error state.
*/
if (event->state >= PERF_EVENT_STATE_INACTIVE) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
update_group_times(event);
if (event == event->group_leader)
group_sched_out(event, cpuctx, ctx);
else
event_sched_out(event, cpuctx, ctx);
event->state = PERF_EVENT_STATE_OFF;
}
raw_spin_unlock(&ctx->lock);
return 0;
}
/*
* Disable a event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This condition is satisifed when called through
* perf_event_for_each_child or perf_event_for_each because they
* hold the top-level event's child_mutex, so any descendant that
* goes to exit will block in sync_child_event.
* When called from perf_pending_event it's OK because event->ctx
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
void perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
* Disable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_disable, event);
return;
}
retry:
if (!task_function_call(task, __perf_event_disable, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If the event is still active, we need to retry the cross-call.
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
raw_spin_unlock_irq(&ctx->lock);
/*
* Reload the task pointer, it might have been changed by
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
goto retry;
}
/*
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_OFF;
}
raw_spin_unlock_irq(&ctx->lock);
}
EXPORT_SYMBOL_GPL(perf_event_disable);
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
u64 tstamp)
{
/*
* use the correct time source for the time snapshot
*
* We could get by without this by leveraging the
* fact that to get to this function, the caller
* has most likely already called update_context_time()
* and update_cgrp_time_xx() and thus both timestamp
* are identical (or very close). Given that tstamp is,
* already adjusted for cgroup, we could say that:
* tstamp - ctx->timestamp
* is equivalent to
* tstamp - cgrp->timestamp.
*
* Then, in perf_output_read(), the calculation would
* work with no changes because:
* - event is guaranteed scheduled in
* - no scheduled out in between
* - thus the timestamp would be the same
*
* But this is a bit hairy.
*
* So instead, we have an explicit cgroup call to remain
* within the time time source all along. We believe it
* is cleaner and simpler to understand.
*/
if (is_cgroup_event(event))
perf_cgroup_set_shadow_time(event, tstamp);
else
event->shadow_ctx_time = tstamp - ctx->timestamp;
}
#define MAX_INTERRUPTS (~0ULL)
static void perf_log_throttle(struct perf_event *event, int enable);
static int
event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
event->state = PERF_EVENT_STATE_ACTIVE;
event->oncpu = smp_processor_id();
/*
* Unthrottle events, since we scheduled we might have missed several
* ticks already, also for a heavily scheduling task there is little
* guarantee it'll get a tick in a timely manner.
*/
if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
perf_log_throttle(event, 1);
event->hw.interrupts = 0;
}
/*
* The new state must be visible before we turn it on in the hardware:
*/
smp_wmb();
if (event->pmu->add(event, PERF_EF_START)) {
event->state = PERF_EVENT_STATE_INACTIVE;
event->oncpu = -1;
return -EAGAIN;
}
event->tstamp_running += tstamp - event->tstamp_stopped;
perf_set_shadow_time(event, ctx, tstamp);
if (!is_software_event(event))
cpuctx->active_oncpu++;
ctx->nr_active++;
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq++;
if (event->attr.exclusive)
cpuctx->exclusive = 1;
return 0;
}
static int
group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group = NULL;
struct pmu *pmu = group_event->pmu;
u64 now = ctx->time;
bool simulate = false;
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
pmu->start_txn(pmu);
if (event_sched_in(group_event, cpuctx, ctx)) {
pmu->cancel_txn(pmu);
return -EAGAIN;
}
/*
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event_sched_in(event, cpuctx, ctx)) {
partial_group = event;
goto group_error;
}
}
if (!pmu->commit_txn(pmu))
return 0;
group_error:
/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
* The events up to the failed event are scheduled out normally,
* tstamp_stopped will be updated.
*
* The failed events and the remaining siblings need to have
* their timings updated as if they had gone thru event_sched_in()
* and event_sched_out(). This is required to get consistent timings
* across the group. This also takes care of the case where the group
* could never be scheduled by ensuring tstamp_stopped is set to mark
* the time the event was actually stopped, such that time delta
* calculation in update_event_times() is correct.
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event == partial_group)
simulate = true;
if (simulate) {
event->tstamp_running += now - event->tstamp_stopped;
event->tstamp_stopped = now;
} else {
event_sched_out(event, cpuctx, ctx);
}
}
event_sched_out(group_event, cpuctx, ctx);
pmu->cancel_txn(pmu);
return -EAGAIN;
}
/*
* Work out whether we can put this event group on the CPU now.
*/
static int group_can_go_on(struct perf_event *event,
struct perf_cpu_context *cpuctx,
int can_add_hw)
{
/*
* Groups consisting entirely of software events can always go on.
*/
if (event->group_flags & PERF_GROUP_SOFTWARE)
return 1;
/*
* If an exclusive group is already on, no other hardware
* events can go on.
*/
if (cpuctx->exclusive)
return 0;
/*
* If this group is exclusive and there are already
* events on the CPU, it can't go on.
*/
if (event->attr.exclusive && cpuctx->active_oncpu)
return 0;
/*
* Otherwise, try to add it if all previous groups were able
* to go on.
*/
return can_add_hw;
}
static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
list_add_event(event, ctx);
perf_group_attach(event);
event->tstamp_enabled = tstamp;
event->tstamp_running = tstamp;
event->tstamp_stopped = tstamp;
}
static void task_ctx_sched_out(struct perf_event_context *ctx);
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
struct task_struct *task)
{
cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
if (ctx)
ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
if (ctx)
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
}
/*
* Cross CPU call to install and enable a performance event
*
* Must be called with ctx->mutex held
*/
static int __perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
struct task_struct *task = current;
perf_ctx_lock(cpuctx, task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
/*
* If there was an active task_ctx schedule it out.
*/
if (task_ctx)
task_ctx_sched_out(task_ctx);
/*
* If the context we're installing events in is not the
* active task_ctx, flip them.
*/
if (ctx->task && task_ctx != ctx) {
if (task_ctx)
raw_spin_unlock(&task_ctx->lock);
raw_spin_lock(&ctx->lock);
task_ctx = ctx;
}
if (task_ctx) {
cpuctx->task_ctx = task_ctx;
task = task_ctx->task;
}
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
update_context_time(ctx);
/*
* update cgrp time only if current cgrp
* matches event->cgrp. Must be done before
* calling add_event_to_ctx()
*/
update_cgrp_time_from_event(event);
add_event_to_ctx(event, ctx);
/*
* Schedule everything back in
*/
perf_event_sched_in(cpuctx, task_ctx, task);
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, task_ctx);
return 0;
}
/*
* Attach a performance event to a context
*
* First we add the event to the list with the hardware enable bit
* in event->hw_config cleared.
*
* If the event is attached to a task which is on a CPU we use a smp
* call to enable it in the task context. The task might have been
* scheduled away, but we check this in the smp call again.
*/
static void
perf_install_in_context(struct perf_event_context *ctx,
struct perf_event *event,
int cpu)
{
struct task_struct *task = ctx->task;
lockdep_assert_held(&ctx->mutex);
event->ctx = ctx;
if (event->cpu != -1)
event->cpu = cpu;
if (!task) {
/*
* Per cpu events are installed via an smp call and
* the install is always successful.
*/
cpu_function_call(cpu, __perf_install_in_context, event);
return;
}
retry:
if (!task_function_call(task, __perf_install_in_context, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If we failed to find a running task, but find the context active now
* that we've acquired the ctx->lock, retry.
*/
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
/*
* Reload the task pointer, it might have been changed by
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
goto retry;
}
/*
* Since the task isn't running, its safe to add the event, us holding
* the ctx->lock ensures the task won't get scheduled in.
*/
add_event_to_ctx(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
/*
* Put a event into inactive state and update time fields.
* Enabling the leader of a group effectively enables all
* the group members that aren't explicitly disabled, so we
* have to update their ->tstamp_enabled also.
* Note: this works for group members as well as group leaders
* since the non-leader members' sibling_lists will be empty.
*/
static void __perf_event_mark_enabled(struct perf_event *event)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
event->state = PERF_EVENT_STATE_INACTIVE;
event->tstamp_enabled = tstamp - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) {
if (sub->state >= PERF_EVENT_STATE_INACTIVE)
sub->tstamp_enabled = tstamp - sub->total_time_enabled;
}
}
/*
* Cross CPU call to enable a performance event
*/
static int __perf_event_enable(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_event *leader = event->group_leader;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
int err;
/*
* There's a time window between 'ctx->is_active' check
* in perf_event_enable function and this place having:
* - IRQs on
* - ctx->lock unlocked
*
* where the task could be killed and 'ctx' deactivated
* by perf_event_exit_task.
*/
if (!ctx->is_active)
return -EINVAL;
raw_spin_lock(&ctx->lock);
update_context_time(ctx);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto unlock;
/*
* set current task's cgroup time reference point
*/
perf_cgroup_set_timestamp(current, ctx);
__perf_event_mark_enabled(event);
if (!event_filter_match(event)) {
if (is_cgroup_event(event))
perf_cgroup_defer_enabled(event);
goto unlock;
}
/*
* If the event is in a group and isn't the group leader,
* then don't put it on unless the group is on.
*/
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
goto unlock;
if (!group_can_go_on(event, cpuctx, 1)) {
err = -EEXIST;
} else {
if (event == leader)
err = group_sched_in(event, cpuctx, ctx);
else
err = event_sched_in(event, cpuctx, ctx);
}
if (err) {
/*
* If this event can't go on and it's part of a
* group, then the whole group has to come off.
*/
if (leader != event)
group_sched_out(leader, cpuctx, ctx);
if (leader->attr.pinned) {
update_group_times(leader);
leader->state = PERF_EVENT_STATE_ERROR;
}
}
unlock:
raw_spin_unlock(&ctx->lock);
return 0;
}
/*
* Enable a event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This condition is satisfied when called through
* perf_event_for_each_child or perf_event_for_each as described
* for perf_event_disable.
*/
void perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
* Enable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_enable, event);
return;
}
raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;
/*
* If the event is in error state, clear that first.
* That way, if we see the event in error state below, we
* know that it has gone back into error state, as distinct
* from the task having been scheduled away before the
* cross-call arrived.
*/
if (event->state == PERF_EVENT_STATE_ERROR)
event->state = PERF_EVENT_STATE_OFF;
retry:
if (!ctx->is_active) {
__perf_event_mark_enabled(event);
goto out;
}
raw_spin_unlock_irq(&ctx->lock);
if (!task_function_call(task, __perf_event_enable, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active and the event is still off,
* we need to retry the cross-call.
*/
if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
/*
* task could have been flipped by a concurrent
* perf_event_context_sched_out()
*/
task = ctx->task;
goto retry;
}
out:
raw_spin_unlock_irq(&ctx->lock);
}
EXPORT_SYMBOL_GPL(perf_event_enable);
int perf_event_refresh(struct perf_event *event, int refresh)
{
/*
* not supported on inherited events
*/
if (event->attr.inherit || !is_sampling_event(event))
return -EINVAL;
atomic_add(refresh, &event->event_limit);
perf_event_enable(event);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_refresh);
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
struct perf_event *event;
int is_active = ctx->is_active;
ctx->is_active &= ~event_type;
if (likely(!ctx->nr_events))
return;
update_context_time(ctx);
update_cgrp_time_from_cpuctx(cpuctx);
if (!ctx->nr_active)
return;
perf_pmu_disable(ctx->pmu);
if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
perf_pmu_enable(ctx->pmu);
}
/*
* Test whether two contexts are equivalent, i.e. whether they
* have both been cloned from the same version of the same context
* and they both have the same number of enabled events.
* If the number of enabled events is the same, then the set
* of enabled events should be the same, because these are both
* inherited contexts, therefore we can't access individual events
* in them directly with an fd; we can only enable/disable all
* events via prctl, or enable/disable all events in a family
* via ioctl, which will have the same effect on both contexts.
*/
static int context_equiv(struct perf_event_context *ctx1,
struct perf_event_context *ctx2)
{
return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
&& ctx1->parent_gen == ctx2->parent_gen
&& !ctx1->pin_count && !ctx2->pin_count;
}
static void __perf_event_sync_stat(struct perf_event *event,
struct perf_event *next_event)
{
u64 value;
if (!event->attr.inherit_stat)
return;
/*
* Update the event value, we cannot use perf_event_read()
* because we're in the middle of a context switch and have IRQs
* disabled, which upsets smp_call_function_single(), however
* we know the event must be on the current CPU, therefore we
* don't need to use it.
*/
switch (event->state) {
case PERF_EVENT_STATE_ACTIVE:
event->pmu->read(event);
/* fall-through */
case PERF_EVENT_STATE_INACTIVE:
update_event_times(event);
break;
default:
break;
}
/*
* In order to keep per-task stats reliable we need to flip the event
* values when we flip the contexts.
*/
value = local64_read(&next_event->count);
value = local64_xchg(&event->count, value);
local64_set(&next_event->count, value);
swap(event->total_time_enabled, next_event->total_time_enabled);
swap(event->total_time_running, next_event->total_time_running);
/*
* Since we swizzled the values, update the user visible data too.
*/
perf_event_update_userpage(event);
perf_event_update_userpage(next_event);
}
static void perf_event_sync_stat(struct perf_event_context *ctx,
struct perf_event_context *next_ctx)
{
struct perf_event *event, *next_event;
if (!ctx->nr_stat)
return;
update_context_time(ctx);
event = list_first_entry(&ctx->event_list,
struct perf_event, event_entry);
next_event = list_first_entry(&next_ctx->event_list,
struct perf_event, event_entry);
while (&event->event_entry != &ctx->event_list &&
&next_event->event_entry != &next_ctx->event_list) {
__perf_event_sync_stat(event, next_event);
event = list_next_entry(event, event_entry);
next_event = list_next_entry(next_event, event_entry);
}
}
static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
struct task_struct *next)
{
struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
struct perf_event_context *next_ctx;
struct perf_event_context *parent;
struct perf_cpu_context *cpuctx;
int do_switch = 1;
if (likely(!ctx))
return;
cpuctx = __get_cpu_context(ctx);
if (!cpuctx->task_ctx)
return;
rcu_read_lock();
parent = rcu_dereference(ctx->parent_ctx);
next_ctx = next->perf_event_ctxp[ctxn];
if (parent && next_ctx &&
rcu_dereference(next_ctx->parent_ctx) == parent) {
/*
* Looks like the two contexts are clones, so we might be
* able to optimize the context switch. We lock both
* contexts and check that they are clones under the
* lock (including re-checking that neither has been
* uncloned in the meantime). It doesn't matter which
* order we take the locks because no other cpu could
* be trying to lock both of these tasks.
*/
raw_spin_lock(&ctx->lock);
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
/*
* XXX do we need a memory barrier of sorts
* wrt to rcu_dereference() of perf_event_ctxp
*/
task->perf_event_ctxp[ctxn] = next_ctx;
next->perf_event_ctxp[ctxn] = ctx;
ctx->task = next;
next_ctx->task = task;
do_switch = 0;
perf_event_sync_stat(ctx, next_ctx);
}
raw_spin_unlock(&next_ctx->lock);
raw_spin_unlock(&ctx->lock);
}
rcu_read_unlock();
if (do_switch) {
raw_spin_lock(&ctx->lock);
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
raw_spin_unlock(&ctx->lock);
}
}
#define for_each_task_context_nr(ctxn) \
for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
/*
* Called from scheduler to remove the events of the current task,
* with interrupts disabled.
*
* We stop each event and update the event value in event->count.
*
* This does not protect us against NMI, but disable()
* sets the disabled bit in the control field of event _before_
* accessing the event control register. If a NMI hits, then it will
* not restart the event.
*/
void __perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next)
{
int ctxn;
for_each_task_context_nr(ctxn)
perf_event_context_sched_out(task, ctxn, next);
/*
* if cgroup events exist on this CPU, then we need
* to check if we have to switch out PMU state.
* cgroup event are system-wide mode only
*/
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
perf_cgroup_sched_out(task, next);
}
static void task_ctx_sched_out(struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
if (!cpuctx->task_ctx)
return;
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
}
/*
* Called with IRQs disabled
*/
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
}
static void
ctx_pinned_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
struct perf_event *event;
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
if (!event_filter_match(event))
continue;
/* may need to reset tstamp_enabled */
if (is_cgroup_event(event))
perf_cgroup_mark_enabled(event, ctx);
if (group_can_go_on(event, cpuctx, 1))
group_sched_in(event, cpuctx, ctx);
/*
* If this pinned group hasn't been scheduled,
* put it in error state.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_ERROR;
}
}
}
static void
ctx_flexible_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
struct perf_event *event;
int can_add_hw = 1;
list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
/* Ignore events in OFF or ERROR state */
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
/*
* Listen to the 'cpu' scheduling filter constraint
* of events:
*/
if (!event_filter_match(event))
continue;
/* may need to reset tstamp_enabled */
if (is_cgroup_event(event))
perf_cgroup_mark_enabled(event, ctx);
if (group_can_go_on(event, cpuctx, can_add_hw)) {
if (group_sched_in(event, cpuctx, ctx))
can_add_hw = 0;
}
}
}
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task)
{
u64 now;
int is_active = ctx->is_active;
ctx->is_active |= event_type;
if (likely(!ctx->nr_events))
return;
now = perf_clock();
ctx->timestamp = now;
perf_cgroup_set_timestamp(task, ctx);
/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
ctx_pinned_sched_in(ctx, cpuctx);
/* Then walk through the lower prio flexible groups */
if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
ctx_flexible_sched_in(ctx, cpuctx);
}
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task)
{
struct perf_event_context *ctx = &cpuctx->ctx;
ctx_sched_in(ctx, cpuctx, event_type, task);
}
static void perf_event_context_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
cpuctx = __get_cpu_context(ctx);
if (cpuctx->task_ctx == ctx)
return;
perf_ctx_lock(cpuctx, ctx);
perf_pmu_disable(ctx->pmu);
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
* cpu flexible, task flexible.
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx->nr_events)
cpuctx->task_ctx = ctx;
perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
/*
* Since these rotations are per-cpu, we need to ensure the
* cpu-context we got scheduled on is actually rotating.
*/
perf_pmu_rotate_start(ctx->pmu);
}
/*
* When sampling the branck stack in system-wide, it may be necessary
* to flush the stack on context switch. This happens when the branch
* stack does not tag its entries with the pid of the current task.
* Otherwise it becomes impossible to associate a branch entry with a
* task. This ambiguity is more likely to appear when the branch stack
* supports priv level filtering and the user sets it to monitor only
* at the user level (which could be a useful measurement in system-wide
* mode). In that case, the risk is high of having a branch stack with
* branch from multiple tasks. Flushing may mean dropping the existing
* entries or stashing them somewhere in the PMU specific code layer.
*
* This function provides the context switch callback to the lower code
* layer. It is invoked ONLY when there is at least one system-wide context
* with at least one active event using taken branch sampling.
*/
static void perf_branch_stack_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
unsigned long flags;
/* no need to flush branch stack if not changing task */
if (prev == task)
return;
local_irq_save(flags);
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
/*
* check if the context has at least one
* event using PERF_SAMPLE_BRANCH_STACK
*/
if (cpuctx->ctx.nr_branch_stack > 0
&& pmu->flush_branch_stack) {
pmu = cpuctx->ctx.pmu;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(pmu);
pmu->flush_branch_stack();
perf_pmu_enable(pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
}
rcu_read_unlock();
local_irq_restore(flags);
}
/*
* Called from scheduler to add the events of the current task
* with interrupts disabled.
*
* We restore the event value and then enable it.
*
* This does not protect us against NMI, but enable()
* sets the enabled bit in the control field of event _before_
* accessing the event control register. If a NMI hits, then it will
* keep the event running.
*/
void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_event_context *ctx;
int ctxn;
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (likely(!ctx))
continue;
perf_event_context_sched_in(ctx, task);
}
/*
* if cgroup events exist on this CPU, then we need
* to check if we have to switch in PMU state.
* cgroup event are system-wide mode only
*/
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
perf_cgroup_sched_in(prev, task);
/* check for system-wide branch_stack events */
if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
perf_branch_stack_sched_in(prev, task);
}
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
u64 frequency = event->attr.sample_freq;
u64 sec = NSEC_PER_SEC;
u64 divisor, dividend;
int count_fls, nsec_fls, frequency_fls, sec_fls;
count_fls = fls64(count);
nsec_fls = fls64(nsec);
frequency_fls = fls64(frequency);
sec_fls = 30;
/*
* We got @count in @nsec, with a target of sample_freq HZ
* the target period becomes:
*
* @count * 10^9
* period = -------------------
* @nsec * sample_freq
*
*/
/*
* Reduce accuracy by one bit such that @a and @b converge
* to a similar magnitude.
*/
#define REDUCE_FLS(a, b) \
do { \
if (a##_fls > b##_fls) { \
a >>= 1; \
a##_fls--; \
} else { \
b >>= 1; \
b##_fls--; \
} \
} while (0)
/*
* Reduce accuracy until either term fits in a u64, then proceed with
* the other, so that finally we can do a u64/u64 division.
*/
while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
REDUCE_FLS(sec, count);
}
if (count_fls + sec_fls > 64) {
divisor = nsec * frequency;
while (count_fls + sec_fls > 64) {
REDUCE_FLS(count, sec);
divisor >>= 1;
}
dividend = count * sec;
} else {
dividend = count * sec;
while (nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
dividend >>= 1;
}
divisor = nsec * frequency;
}
if (!divisor)
return dividend;
return div64_u64(dividend, divisor);
}
static DEFINE_PER_CPU(int, perf_throttled_count);
static DEFINE_PER_CPU(u64, perf_throttled_seq);
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
{
struct hw_perf_event *hwc = &event->hw;
s64 period, sample_period;
s64 delta;
period = perf_calculate_period(event, nsec, count);
delta = (s64)(period - hwc->sample_period);
delta = (delta + 7) / 8; /* low pass filter */
sample_period = hwc->sample_period + delta;
if (!sample_period)
sample_period = 1;
hwc->sample_period = sample_period;
if (local64_read(&hwc->period_left) > 8*sample_period) {
if (disable)
event->pmu->stop(event, PERF_EF_UPDATE);
local64_set(&hwc->period_left, 0);
if (disable)
event->pmu->start(event, PERF_EF_RELOAD);
}
}
/*
* combine freq adjustment with unthrottling to avoid two passes over the
* events. At the same time, make sure, having freq events does not change
* the rate of unthrottling as that would introduce bias.
*/
static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
int needs_unthr)
{
struct perf_event *event;
struct hw_perf_event *hwc;
u64 now, period = TICK_NSEC;
s64 delta;
/*
* only need to iterate over all events iff:
* - context have events in frequency mode (needs freq adjust)
* - there are events to unthrottle on this cpu
*/
if (!(ctx->nr_freq || needs_unthr))
return;
raw_spin_lock(&ctx->lock);
perf_pmu_disable(ctx->pmu);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
if (!event_filter_match(event))
continue;
hwc = &event->hw;
if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
hwc->interrupts = 0;
perf_log_throttle(event, 1);
event->pmu->start(event, 0);
}
if (!event->attr.freq || !event->attr.sample_freq)
continue;
/*
* stop the event and update event->count
*/
event->pmu->stop(event, PERF_EF_UPDATE);
now = local64_read(&event->count);
delta = now - hwc->freq_count_stamp;
hwc->freq_count_stamp = now;
/*
* restart the event
* reload only if value has changed
* we have stopped the event so tell that
* to perf_adjust_period() to avoid stopping it
* twice.
*/
if (delta > 0)
perf_adjust_period(event, period, delta, false);
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
}
perf_pmu_enable(ctx->pmu);
raw_spin_unlock(&ctx->lock);
}
/*
* Round-robin a context's events:
*/
static void rotate_ctx(struct perf_event_context *ctx)
{
/*
* Rotate the first entry last of non-pinned groups. Rotation might be
* disabled by the inheritance code.
*/
if (!ctx->rotate_disable)
list_rotate_left(&ctx->flexible_groups);
}
/*
* perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
* because they're strictly cpu affine and rotate_start is called with IRQs
* disabled, while rotate_context is called from IRQ context.
*/
static void perf_rotate_context(struct perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = NULL;
int rotate = 0, remove = 1;
if (cpuctx->ctx.nr_events) {
remove = 0;
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
rotate = 1;
}
ctx = cpuctx->task_ctx;
if (ctx && ctx->nr_events) {
remove = 0;
if (ctx->nr_events != ctx->nr_active)
rotate = 1;
}
if (!rotate)
goto done;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);
perf_event_sched_in(cpuctx, ctx, current);
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
done:
if (remove)
list_del_init(&cpuctx->rotation_list);
}
#ifdef CONFIG_NO_HZ_FULL
bool perf_event_can_stop_tick(void)
{
if (list_empty(&__get_cpu_var(rotation_list)))
return true;
else
return false;
}
#endif
void perf_event_task_tick(void)
{
struct list_head *head = &__get_cpu_var(rotation_list);
struct perf_cpu_context *cpuctx, *tmp;
struct perf_event_context *ctx;
int throttled;
WARN_ON(!irqs_disabled());
__this_cpu_inc(perf_throttled_seq);
throttled = __this_cpu_xchg(perf_throttled_count, 0);
list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
ctx = &cpuctx->ctx;
perf_adjust_freq_unthr_context(ctx, throttled);
ctx = cpuctx->task_ctx;
if (ctx)
perf_adjust_freq_unthr_context(ctx, throttled);
if (cpuctx->jiffies_interval == 1 ||
!(jiffies % cpuctx->jiffies_interval))
perf_rotate_context(cpuctx);
}
}
static int event_enable_on_exec(struct perf_event *event,
struct perf_event_context *ctx)
{
if (!event->attr.enable_on_exec)
return 0;
event->attr.enable_on_exec = 0;
if (event->state >= PERF_EVENT_STATE_INACTIVE)
return 0;
__perf_event_mark_enabled(event);
return 1;
}
/*
* Enable all of a task's events that have been marked enable-on-exec.
* This expects task == current.
*/
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
{
struct perf_event *event;
unsigned long flags;
int enabled = 0;
int ret;
local_irq_save(flags);
if (!ctx || !ctx->nr_events)
goto out;
/*
* We must ctxsw out cgroup events to avoid conflict
* when invoking perf_task_event_sched_in() later on
* in this function. Otherwise we end up trying to
* ctxswin cgroup events which are already scheduled
* in.
*/
perf_cgroup_sched_out(current, NULL);
raw_spin_lock(&ctx->lock);
task_ctx_sched_out(ctx);
list_for_each_entry(event, &ctx->event_list, event_entry) {
ret = event_enable_on_exec(event, ctx);
if (ret)
enabled = 1;
}
/*
* Unclone this context if we enabled any event.
*/
if (enabled)
unclone_ctx(ctx);
raw_spin_unlock(&ctx->lock);
/*
* Also calls ctxswin for cgroup events, if any:
*/
perf_event_context_sched_in(ctx, ctx->task);
out:
local_irq_restore(flags);
}
/*
* Cross CPU call to read the hardware event
*/
static void __perf_event_read(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/*
* If this is a task context, we need to check whether it is
* the current task context of this cpu. If not it has been
* scheduled out before the smp call arrived. In that case
* event->count would have been updated to a recent sample
* when the event was scheduled out.
*/
if (ctx->task && cpuctx->task_ctx != ctx)
return;
raw_spin_lock(&ctx->lock);
if (ctx->is_active) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
update_event_times(event);
if (event->state == PERF_EVENT_STATE_ACTIVE)
event->pmu->read(event);
raw_spin_unlock(&ctx->lock);
}
static inline u64 perf_event_count(struct perf_event *event)
{
return local64_read(&event->count) + atomic64_read(&event->child_count);
}
static u64 perf_event_read(struct perf_event *event)
{
/*
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
smp_call_function_single(event->oncpu,
__perf_event_read, event, 1);
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
/*
* may read while context is not active
* (e.g., thread is blocked), in that case
* we cannot update context time
*/
if (ctx->is_active) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return perf_event_count(event);
}
/*
* Initialize the perf_event context in a task_struct:
*/
static void __perf_event_init_context(struct perf_event_context *ctx)
{
raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->pinned_groups);
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
atomic_set(&ctx->refcount, 1);
}
static struct perf_event_context *
alloc_perf_context(struct pmu *pmu, struct task_struct *task)
{
struct perf_event_context *ctx;
ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
if (!ctx)
return NULL;
__perf_event_init_context(ctx);
if (task) {
ctx->task = task;
get_task_struct(task);
}
ctx->pmu = pmu;
return ctx;
}
static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{
struct task_struct *task;
int err;
rcu_read_lock();
if (!vpid)
task = current;
else
task = find_task_by_vpid(vpid);
if (task)
get_task_struct(task);
rcu_read_unlock();
if (!task)
return ERR_PTR(-ESRCH);
/* Reuse ptrace permission checks for now. */
err = -EACCES;
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
goto errout;
return task;
errout:
put_task_struct(task);
return ERR_PTR(err);
}
/*
* Returns a matching context with refcount and pincount.
*/
static struct perf_event_context *
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
{
struct perf_event_context *ctx;
struct perf_cpu_context *cpuctx;
unsigned long flags;
int ctxn, err;
if (!task) {
/* Must be root to operate on a CPU event: */
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
/*
* We could be clever and allow to attach a event to an
* offline CPU and activate it when the CPU comes up, but
* that's for later.
*/
if (!cpu_online(cpu))
return ERR_PTR(-ENODEV);
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
ctx = &cpuctx->ctx;
get_ctx(ctx);
++ctx->pin_count;
return ctx;
}
err = -EINVAL;
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto errout;
retry:
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
unclone_ctx(ctx);
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
} else {
ctx = alloc_perf_context(pmu, task);
err = -ENOMEM;
if (!ctx)
goto errout;
err = 0;
mutex_lock(&task->perf_event_mutex);
/*
* If it has already passed perf_event_exit_task().
* we must see PF_EXITING, it takes this mutex too.
*/
if (task->flags & PF_EXITING)
err = -ESRCH;
else if (task->perf_event_ctxp[ctxn])
err = -EAGAIN;
else {
get_ctx(ctx);
++ctx->pin_count;
rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
}
mutex_unlock(&task->perf_event_mutex);
if (unlikely(err)) {
put_ctx(ctx);
if (err == -EAGAIN)
goto retry;
goto errout;
}
}
return ctx;
errout:
return ERR_PTR(err);
}
static void perf_event_free_filter(struct perf_event *event);
static void free_event_rcu(struct rcu_head *head)
{
struct perf_event *event;
event = container_of(head, struct perf_event, rcu_head);
if (event->ns)
put_pid_ns(event->ns);
perf_event_free_filter(event);
kfree(event);
}
static void ring_buffer_put(struct ring_buffer *rb);
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
static void free_event(struct perf_event *event)
{
irq_work_sync(&event->pending);
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
if (is_cgroup_event(event)) {
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_dec_deferred(&perf_sched_events);
}
if (has_branch_stack(event)) {
static_key_slow_dec_deferred(&perf_sched_events);
/* is system-wide event */
if (!(event->attach_state & PERF_ATTACH_TASK)) {
atomic_dec(&per_cpu(perf_branch_stack_events,
event->cpu));
}
}
}
if (event->rb) {
struct ring_buffer *rb;
/*
* Can happen when we close an event with re-directed output.
*
* Since we have a 0 refcount, perf_mmap_close() will skip
* over us; possibly making our ring_buffer_put() the last.
*/
mutex_lock(&event->mmap_mutex);
rb = event->rb;
if (rb) {
rcu_assign_pointer(event->rb, NULL);
ring_buffer_detach(event, rb);
ring_buffer_put(rb); /* could be last */
}
mutex_unlock(&event->mmap_mutex);
}
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (event->destroy)
event->destroy(event);
if (event->ctx)
put_ctx(event->ctx);
call_rcu(&event->rcu_head, free_event_rcu);
}
int perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
WARN_ON_ONCE(ctx->parent_ctx);
/*
* There are two ways this annotation is useful:
*
* 1) there is a lock recursion from perf_event_exit_task
* see the comment there.
*
* 2) there is a lock-inversion with mmap_sem through
* perf_event_read_group(), which takes faults while
* holding ctx->mutex, however this is called after
* the last filedesc died, so there is no possibility
* to trigger the AB-BA case.
*/
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
perf_remove_from_context(event, true);
mutex_unlock(&ctx->mutex);
free_event(event);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
/*
* Called when the last reference to the file is gone.
*/
static void put_event(struct perf_event *event)
{
struct task_struct *owner;
if (!atomic_long_dec_and_test(&event->refcount))
return;
rcu_read_lock();
owner = ACCESS_ONCE(event->owner);
/*
* Matches the smp_wmb() in perf_event_exit_task(). If we observe
* !owner it means the list deletion is complete and we can indeed
* free this event, otherwise we need to serialize on
* owner->perf_event_mutex.
*/
smp_read_barrier_depends();
if (owner) {
/*
* Since delayed_put_task_struct() also drops the last
* task reference we can safely take a new reference
* while holding the rcu_read_lock().
*/
get_task_struct(owner);
}
rcu_read_unlock();
if (owner) {
mutex_lock(&owner->perf_event_mutex);
/*
* We have to re-check the event->owner field, if it is cleared
* we raced with perf_event_exit_task(), acquiring the mutex
* ensured they're done, and we can proceed with freeing the
* event.
*/
if (event->owner)
list_del_init(&event->owner_entry);
mutex_unlock(&owner->perf_event_mutex);
put_task_struct(owner);
}
perf_event_release_kernel(event);
}
static int perf_release(struct inode *inode, struct file *file)
{
put_event(file->private_data);
return 0;
}
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{
struct perf_event *child;
u64 total = 0;
*enabled = 0;
*running = 0;
mutex_lock(&event->child_mutex);
total += perf_event_read(event);
*enabled += event->total_time_enabled +
atomic64_read(&event->child_total_time_enabled);
*running += event->total_time_running +
atomic64_read(&event->child_total_time_running);
list_for_each_entry(child, &event->child_list, child_list) {
total += perf_event_read(child);
*enabled += child->total_time_enabled;
*running += child->total_time_running;
}
mutex_unlock(&event->child_mutex);
return total;
}
EXPORT_SYMBOL_GPL(perf_event_read_value);
static int perf_event_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{
struct perf_event *leader = event->group_leader, *sub;
int n = 0, size = 0, ret = -EFAULT;
struct perf_event_context *ctx = leader->ctx;
u64 values[5];
u64 count, enabled, running;
mutex_lock(&ctx->mutex);
count = perf_event_read_value(leader, &enabled, &running);
values[n++] = 1 + leader->nr_siblings;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
values[n++] = count;
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
size = n * sizeof(u64);
if (copy_to_user(buf, values, size))
goto unlock;
ret = size;
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
values[n++] = perf_event_read_value(sub, &enabled, &running);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
size = n * sizeof(u64);
if (copy_to_user(buf + ret, values, size)) {
ret = -EFAULT;
goto unlock;
}
ret += size;
}
unlock:
mutex_unlock(&ctx->mutex);
return ret;
}
static int perf_event_read_one(struct perf_event *event,
u64 read_format, char __user *buf)
{
u64 enabled, running;
u64 values[4];
int n = 0;
values[n++] = perf_event_read_value(event, &enabled, &running);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
if (copy_to_user(buf, values, n * sizeof(u64)))
return -EFAULT;
return n * sizeof(u64);
}
/*
* Read the performance event - simple non blocking version for now
*/
static ssize_t
perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
{
u64 read_format = event->attr.read_format;
int ret;
/*
* Return end-of-file for a read on a event that is in
* error state (i.e. because it was pinned but it couldn't be
* scheduled on to the CPU at some point).
*/
if (event->state == PERF_EVENT_STATE_ERROR)
return 0;
if (count < event->read_size)
return -ENOSPC;
WARN_ON_ONCE(event->ctx->parent_ctx);
if (read_format & PERF_FORMAT_GROUP)
ret = perf_event_read_group(event, read_format, buf);
else
ret = perf_event_read_one(event, read_format, buf);
return ret;
}
static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_event *event = file->private_data;
return perf_read_hw(event, buf, count);
}
static unsigned int perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
struct ring_buffer *rb;
unsigned int events = POLL_HUP;
/*
* Pin the event->rb by taking event->mmap_mutex; otherwise
* perf_event_set_output() can swizzle our rb and make us miss wakeups.
*/
mutex_lock(&event->mmap_mutex);
rb = event->rb;
if (rb)
events = atomic_xchg(&rb->poll, 0);
mutex_unlock(&event->mmap_mutex);
poll_wait(file, &event->waitq, wait);
return events;
}
static void perf_event_reset(struct perf_event *event)
{
(void)perf_event_read(event);
local64_set(&event->count, 0);
perf_event_update_userpage(event);
}
/*
* Holding the top-level event's child_mutex means that any
* descendant process that has inherited this event will block
* in sync_child_event if it goes to exit, thus satisfying the
* task existence requirements of perf_event_enable/disable.
*/
static void perf_event_for_each_child(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event *child;
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->child_mutex);
func(event);
list_for_each_entry(child, &event->child_list, child_list)
func(child);
mutex_unlock(&event->child_mutex);
}
static void perf_event_for_each(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *sibling;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
event = event->group_leader;
perf_event_for_each_child(event, func);
list_for_each_entry(sibling, &event->sibling_list, group_entry)
perf_event_for_each_child(sibling, func);
mutex_unlock(&ctx->mutex);
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
struct perf_event_context *ctx = event->ctx;
int ret = 0;
u64 value;
if (!is_sampling_event(event))
return -EINVAL;
if (copy_from_user(&value, arg, sizeof(value)))
return -EFAULT;
if (!value)
return -EINVAL;
raw_spin_lock_irq(&ctx->lock);
if (event->attr.freq) {
if (value > sysctl_perf_event_sample_rate) {
ret = -EINVAL;
goto unlock;
}
event->attr.sample_freq = value;
} else {
event->attr.sample_period = value;
event->hw.sample_period = value;
}
unlock:
raw_spin_unlock_irq(&ctx->lock);
return ret;
}
static const struct file_operations perf_fops;
static inline int perf_fget_light(int fd, struct fd *p)
{
struct fd f = fdget(fd);
if (!f.file)
return -EBADF;
if (f.file->f_op != &perf_fops) {
fdput(f);
return -EBADF;
}
*p = f;
return 0;
}
static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_event *event = file->private_data;
void (*func)(struct perf_event *);
u32 flags = arg;
switch (cmd) {
case PERF_EVENT_IOC_ENABLE:
func = perf_event_enable;
break;
case PERF_EVENT_IOC_DISABLE:
func = perf_event_disable;
break;
case PERF_EVENT_IOC_RESET:
func = perf_event_reset;
break;
case PERF_EVENT_IOC_REFRESH:
return perf_event_refresh(event, arg);
case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
case PERF_EVENT_IOC_SET_OUTPUT:
{
int ret;
if (arg != -1) {
struct perf_event *output_event;
struct fd output;
ret = perf_fget_light(arg, &output);
if (ret)
return ret;
output_event = output.file->private_data;
ret = perf_event_set_output(event, output_event);
fdput(output);
} else {
ret = perf_event_set_output(event, NULL);
}
return ret;
}
case PERF_EVENT_IOC_SET_FILTER:
return perf_event_set_filter(event, (void __user *)arg);
default:
return -ENOTTY;
}
if (flags & PERF_IOC_FLAG_GROUP)
perf_event_for_each(event, func);
else
perf_event_for_each_child(event, func);
return 0;
}
#ifdef CONFIG_COMPAT
static long perf_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
switch (_IOC_NR(cmd)) {
case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
/* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
cmd &= ~IOCSIZE_MASK;
cmd |= sizeof(void *) << IOCSIZE_SHIFT;
}
break;
}
return perf_ioctl(file, cmd, arg);
}
#else
# define perf_compat_ioctl NULL
#endif
int perf_event_task_enable(void)
{
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
perf_event_for_each_child(event, perf_event_enable);
mutex_unlock(¤t->perf_event_mutex);
return 0;
}
int perf_event_task_disable(void)
{
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
perf_event_for_each_child(event, perf_event_disable);
mutex_unlock(¤t->perf_event_mutex);
return 0;
}
static int perf_event_index(struct perf_event *event)
{
if (event->hw.state & PERF_HES_STOPPED)
return 0;
if (event->state != PERF_EVENT_STATE_ACTIVE)
return 0;
return event->pmu->event_idx(event);
}
static void calc_timer_values(struct perf_event *event,
u64 *now,
u64 *enabled,
u64 *running)
{
u64 ctx_time;
*now = perf_clock();
ctx_time = event->shadow_ctx_time + *now;
*enabled = ctx_time - event->tstamp_enabled;
*running = ctx_time - event->tstamp_running;
}
void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
}
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch
* code calls this from NMI context.
*/
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct ring_buffer *rb;
u64 enabled, running, now;
rcu_read_lock();
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we can be called in
* NMI context
*/
calc_timer_values(event, &now, &enabled, &running);
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
userpg = rb->user_page;
/*
* Disable preemption so as to not let the corresponding user-space
* spin too long if we get preempted.
*/
preempt_disable();
++userpg->lock;
barrier();
userpg->index = perf_event_index(event);
userpg->offset = perf_event_count(event);
if (userpg->index)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
atomic64_read(&event->child_total_time_enabled);
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
arch_perf_update_userpage(userpg, now);
barrier();
++userpg->lock;
preempt_enable();
unlock:
rcu_read_unlock();
}
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct perf_event *event = vma->vm_file->private_data;
struct ring_buffer *rb;
int ret = VM_FAULT_SIGBUS;
if (vmf->flags & FAULT_FLAG_MKWRITE) {
if (vmf->pgoff == 0)
ret = 0;
return ret;
}
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
goto unlock;
vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
if (!vmf->page)
goto unlock;
get_page(vmf->page);
vmf->page->mapping = vma->vm_file->f_mapping;
vmf->page->index = vmf->pgoff;
ret = 0;
unlock:
rcu_read_unlock();
return ret;
}
static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb)
{
unsigned long flags;
if (!list_empty(&event->rb_entry))
return;
spin_lock_irqsave(&rb->event_lock, flags);
if (list_empty(&event->rb_entry))
list_add(&event->rb_entry, &rb->event_list);
spin_unlock_irqrestore(&rb->event_lock, flags);
}
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
{
unsigned long flags;
if (list_empty(&event->rb_entry))
return;
spin_lock_irqsave(&rb->event_lock, flags);
list_del_init(&event->rb_entry);
wake_up_all(&event->waitq);
spin_unlock_irqrestore(&rb->event_lock, flags);
}
static void ring_buffer_wakeup(struct perf_event *event)
{
struct ring_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
wake_up_all(&event->waitq);
}
rcu_read_unlock();
}
static void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
rb = container_of(rcu_head, struct ring_buffer, rcu_head);
rb_free(rb);
}
static struct ring_buffer *ring_buffer_get(struct perf_event *event)
{
struct ring_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
if (!atomic_inc_not_zero(&rb->refcount))
rb = NULL;
}
rcu_read_unlock();
return rb;
}
static void ring_buffer_put(struct ring_buffer *rb)
{
if (!atomic_dec_and_test(&rb->refcount))
return;
WARN_ON_ONCE(!list_empty(&rb->event_list));
call_rcu(&rb->rcu_head, rb_free_rcu);
}
static void perf_mmap_open(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
atomic_inc(&event->mmap_count);
atomic_inc(&event->rb->mmap_count);
}
/*
* A buffer can be mmap()ed multiple times; either directly through the same
* event, or through other events by use of perf_event_set_output().
*
* In order to undo the VM accounting done by perf_mmap() we need to destroy
* the buffer here, where we still have a VM context. This means we need
* to detach all events redirecting to us.
*/
static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
struct ring_buffer *rb = event->rb;
struct user_struct *mmap_user = rb->mmap_user;
int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb);
atomic_dec(&rb->mmap_count);
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
return;
/* Detach current event from the buffer. */
rcu_assign_pointer(event->rb, NULL);
ring_buffer_detach(event, rb);
mutex_unlock(&event->mmap_mutex);
/* If there's still other mmap()s of this buffer, we're done. */
if (atomic_read(&rb->mmap_count)) {
ring_buffer_put(rb); /* can't be last */
return;
}
/*
* No other mmap()s, detach from all other events that might redirect
* into the now unreachable buffer. Somewhat complicated by the
* fact that rb::event_lock otherwise nests inside mmap_mutex.
*/
again:
rcu_read_lock();
list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
if (!atomic_long_inc_not_zero(&event->refcount)) {
/*
* This event is en-route to free_event() which will
* detach it and remove it from the list.
*/
continue;
}
rcu_read_unlock();
mutex_lock(&event->mmap_mutex);
/*
* Check we didn't race with perf_event_set_output() which can
* swizzle the rb from under us while we were waiting to
* acquire mmap_mutex.
*
* If we find a different rb; ignore this event, a next
* iteration will no longer find it on the list. We have to
* still restart the iteration to make sure we're not now
* iterating the wrong list.
*/
if (event->rb == rb) {
rcu_assign_pointer(event->rb, NULL);
ring_buffer_detach(event, rb);
ring_buffer_put(rb); /* can't be last, we still have one */
}
mutex_unlock(&event->mmap_mutex);
put_event(event);
/*
* Restart the iteration; either we're on the wrong list or
* destroyed its integrity by doing a deletion.
*/
goto again;
}
rcu_read_unlock();
/*
* It could be there's still a few 0-ref events on the list; they'll
* get cleaned up by free_event() -- they'll also still have their
* ref on the rb and will free it whenever they are done with it.
*
* Aside from that, this buffer is 'fully' detached and unmapped,
* undo the VM accounting.
*/
atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
vma->vm_mm->pinned_vm -= mmap_locked;
free_uid(mmap_user);
ring_buffer_put(rb); /* could be last */
}
static const struct vm_operations_struct perf_mmap_vmops = {
.open = perf_mmap_open,
.close = perf_mmap_close,
.fault = perf_mmap_fault,
.page_mkwrite = perf_mmap_fault,
};
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
struct perf_event *event = file->private_data;
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
unsigned long locked, lock_limit;
struct ring_buffer *rb;
unsigned long vma_size;
unsigned long nr_pages;
long user_extra, extra;
int ret = 0, flags = 0;
/*
* Don't allow mmap() of inherited per-task counters. This would
* create a performance issue due to all children writing to the
* same rb.
*/
if (event->cpu == -1 && event->attr.inherit)
return -EINVAL;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma_size = vma->vm_end - vma->vm_start;
nr_pages = (vma_size / PAGE_SIZE) - 1;
/*
* If we have rb pages ensure they're a power-of-two number, so we
* can do bitmasks instead of modulo.
*/
if (nr_pages != 0 && !is_power_of_2(nr_pages))
return -EINVAL;
if (vma_size != PAGE_SIZE * (1 + nr_pages))
return -EINVAL;
if (vma->vm_pgoff != 0)
return -EINVAL;
WARN_ON_ONCE(event->ctx->parent_ctx);
again:
mutex_lock(&event->mmap_mutex);
if (event->rb) {
if (event->rb->nr_pages != nr_pages) {
ret = -EINVAL;
goto unlock;
}
if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
/*
* Raced against perf_mmap_close() through
* perf_event_set_output(). Try again, hope for better
* luck.
*/
mutex_unlock(&event->mmap_mutex);
goto again;
}
goto unlock;
}
user_extra = nr_pages + 1;
user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
/*
* Increase the limit linearly with more CPUs:
*/
user_lock_limit *= num_online_cpus();
user_locked = atomic_long_read(&user->locked_vm) + user_extra;
extra = 0;
if (user_locked > user_lock_limit)
extra = user_locked - user_lock_limit;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
locked = vma->vm_mm->pinned_vm + extra;
if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
!capable(CAP_IPC_LOCK)) {
ret = -EPERM;
goto unlock;
}
WARN_ON(event->rb);
if (vma->vm_flags & VM_WRITE)
flags |= RING_BUFFER_WRITABLE;
rb = rb_alloc(nr_pages,
event->attr.watermark ? event->attr.wakeup_watermark : 0,
event->cpu, flags);
if (!rb) {
ret = -ENOMEM;
goto unlock;
}
atomic_set(&rb->mmap_count, 1);
rb->mmap_locked = extra;
rb->mmap_user = get_current_user();
atomic_long_add(user_extra, &user->locked_vm);
vma->vm_mm->pinned_vm += extra;
ring_buffer_attach(event, rb);
rcu_assign_pointer(event->rb, rb);
perf_event_update_userpage(event);
unlock:
if (!ret)
atomic_inc(&event->mmap_count);
mutex_unlock(&event->mmap_mutex);
/*
* Since pinned accounting is per vm we cannot allow fork() to copy our
* vma.
*/
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &perf_mmap_vmops;
return ret;
}
static int perf_fasync(int fd, struct file *filp, int on)
{
struct inode *inode = file_inode(filp);
struct perf_event *event = filp->private_data;
int retval;
mutex_lock(&inode->i_mutex);
retval = fasync_helper(fd, filp, on, &event->fasync);
mutex_unlock(&inode->i_mutex);
if (retval < 0)
return retval;
return 0;
}
static const struct file_operations perf_fops = {
.llseek = no_llseek,
.release = perf_release,
.read = perf_read,
.poll = perf_poll,
.unlocked_ioctl = perf_ioctl,
.compat_ioctl = perf_compat_ioctl,
.mmap = perf_mmap,
.fasync = perf_fasync,
};
/*
* Perf event wakeup
*
* If there's data, ensure we set the poll() state and publish everything
* to user-space before waking everybody up.
*/
static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
{
/* only the parent has fasync state */
if (event->parent)
event = event->parent;
return &event->fasync;
}
void perf_event_wakeup(struct perf_event *event)
{
ring_buffer_wakeup(event);
if (event->pending_kill) {
kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
event->pending_kill = 0;
}
}
static void perf_pending_event(struct irq_work *entry)
{
struct perf_event *event = container_of(entry,
struct perf_event, pending);
int rctx;
rctx = perf_swevent_get_recursion_context();
/*
* If we 'fail' here, that's OK, it means recursion is already disabled
* and we won't recurse 'further'.
*/
if (event->pending_disable) {
event->pending_disable = 0;
__perf_event_disable(event);
}
if (event->pending_wakeup) {
event->pending_wakeup = 0;
perf_event_wakeup(event);
}
if (rctx >= 0)
perf_swevent_put_recursion_context(rctx);
}
/*
* We assume there is only KVM supporting the callbacks.
* Later on, we might change it to a list if there is
* another virtualization implementation supporting the callbacks.
*/
struct perf_guest_info_callbacks *perf_guest_cbs;
int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
perf_guest_cbs = cbs;
return 0;
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
perf_guest_cbs = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
static void
perf_output_sample_regs(struct perf_output_handle *handle,
struct pt_regs *regs, u64 mask)
{
int bit;
for_each_set_bit(bit, (const unsigned long *) &mask,
sizeof(mask) * BITS_PER_BYTE) {
u64 val;
val = perf_reg_value(regs, bit);
perf_output_put(handle, val);
}
}
static void perf_sample_regs_user(struct perf_regs_user *regs_user,
struct pt_regs *regs)
{
if (!user_mode(regs)) {
if (current->mm)
regs = task_pt_regs(current);
else
regs = NULL;
}
if (regs) {
regs_user->regs = regs;
regs_user->abi = perf_reg_abi(current);
}
}
/*
* Get remaining task size from user stack pointer.
*
* It'd be better to take stack vma map and limit this more
* precisly, but there's no way to get it safely under interrupt,
* so using TASK_SIZE as limit.
*/
static u64 perf_ustack_task_size(struct pt_regs *regs)
{
unsigned long addr = perf_user_stack_pointer(regs);
if (!addr || addr >= TASK_SIZE)
return 0;
return TASK_SIZE - addr;
}
static u16
perf_sample_ustack_size(u16 stack_size, u16 header_size,
struct pt_regs *regs)
{
u64 task_size;
/* No regs, no stack pointer, no dump. */
if (!regs)
return 0;
/*
* Check if we fit in with the requested stack size into the:
* - TASK_SIZE
* If we don't, we limit the size to the TASK_SIZE.
*
* - remaining sample size
* If we don't, we customize the stack size to
* fit in to the remaining sample size.
*/
task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
stack_size = min(stack_size, (u16) task_size);
/* Current header size plus static size and dynamic size. */
header_size += 2 * sizeof(u64);
/* Do we fit in with the current stack dump size? */
if ((u16) (header_size + stack_size) < header_size) {
/*
* If we overflow the maximum size for the sample,
* we customize the stack dump size to fit in.
*/
stack_size = USHRT_MAX - header_size - sizeof(u64);
stack_size = round_up(stack_size, sizeof(u64));
}
return stack_size;
}
static void
perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
struct pt_regs *regs)
{
/* Case of a kernel thread, nothing to dump */
if (!regs) {
u64 size = 0;
perf_output_put(handle, size);
} else {
unsigned long sp;
unsigned int rem;
u64 dyn_size;
/*
* We dump:
* static size
* - the size requested by user or the best one we can fit
* in to the sample max size
* data
* - user stack dump data
* dynamic size
* - the actual dumped size
*/
/* Static size. */
perf_output_put(handle, dump_size);
/* Data. */
sp = perf_user_stack_pointer(regs);
rem = __output_copy_user(handle, (void *) sp, dump_size);
dyn_size = dump_size - rem;
perf_output_skip(handle, rem);
/* Dynamic size. */
perf_output_put(handle, dyn_size);
}
}
static void __perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
u64 sample_type = event->attr.sample_type;
data->type = sample_type;
header->size += event->id_header_size;
if (sample_type & PERF_SAMPLE_TID) {
/* namespace issues */
data->tid_entry.pid = perf_event_pid(event, current);
data->tid_entry.tid = perf_event_tid(event, current);
}
if (sample_type & PERF_SAMPLE_TIME)
data->time = perf_clock();
if (sample_type & PERF_SAMPLE_ID)
data->id = primary_event_id(event);
if (sample_type & PERF_SAMPLE_STREAM_ID)
data->stream_id = event->id;
if (sample_type & PERF_SAMPLE_CPU) {
data->cpu_entry.cpu = raw_smp_processor_id();
data->cpu_entry.reserved = 0;
}
}
void perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
if (event->attr.sample_id_all)
__perf_event_header__init_id(header, data, event);
}
static void __perf_event__output_id_sample(struct perf_output_handle *handle,
struct perf_sample_data *data)
{
u64 sample_type = data->type;
if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
}
void perf_event__output_id_sample(struct perf_event *event,
struct perf_output_handle *handle,
struct perf_sample_data *sample)
{
if (event->attr.sample_id_all)
__perf_event__output_id_sample(handle, sample);
}
static void perf_output_read_one(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{
u64 read_format = event->attr.read_format;
u64 values[4];
int n = 0;
values[n++] = perf_event_count(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
atomic64_read(&event->child_total_time_enabled);
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
values[n++] = running +
atomic64_read(&event->child_total_time_running);
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
__output_copy(handle, values, n * sizeof(u64));
}
/*
* XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
*/
static void perf_output_read_group(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{
struct perf_event *leader = event->group_leader, *sub;
u64 read_format = event->attr.read_format;
u64 values[5];
int n = 0;
values[n++] = 1 + leader->nr_siblings;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
if (leader != event)
leader->pmu->read(leader);
values[n++] = perf_event_count(leader);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
__output_copy(handle, values, n * sizeof(u64));
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
if (sub != event)
sub->pmu->read(sub);
values[n++] = perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
__output_copy(handle, values, n * sizeof(u64));
}
}
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
PERF_FORMAT_TOTAL_TIME_RUNNING)
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
u64 enabled = 0, running = 0, now;
u64 read_format = event->attr.read_format;
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we are called in
* NMI context
*/
if (read_format & PERF_FORMAT_TOTAL_TIMES)
calc_timer_values(event, &now, &enabled, &running);
if (event->attr.read_format & PERF_FORMAT_GROUP)
perf_output_read_group(handle, event, enabled, running);
else
perf_output_read_one(handle, event, enabled, running);
}
void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
u64 sample_type = data->type;
perf_output_put(handle, *header);
if (sample_type & PERF_SAMPLE_IP)
perf_output_put(handle, data->ip);
if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ADDR)
perf_output_put(handle, data->addr);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
if (sample_type & PERF_SAMPLE_PERIOD)
perf_output_put(handle, data->period);
if (sample_type & PERF_SAMPLE_READ)
perf_output_read(handle, event);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
if (data->callchain) {
int size = 1;
if (data->callchain)
size += data->callchain->nr;
size *= sizeof(u64);
__output_copy(handle, data->callchain, size);
} else {
u64 nr = 0;
perf_output_put(handle, nr);
}
}
if (sample_type & PERF_SAMPLE_RAW) {
if (data->raw) {
perf_output_put(handle, data->raw->size);
__output_copy(handle, data->raw->data,
data->raw->size);
} else {
struct {
u32 size;
u32 data;
} raw = {
.size = sizeof(u32),
.data = 0,
};
perf_output_put(handle, raw);
}
}
if (!event->attr.watermark) {
int wakeup_events = event->attr.wakeup_events;
if (wakeup_events) {
struct ring_buffer *rb = handle->rb;
int events = local_inc_return(&rb->events);
if (events >= wakeup_events) {
local_sub(wakeup_events, &rb->events);
local_inc(&rb->wakeup);
}
}
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (data->br_stack) {
size_t size;
size = data->br_stack->nr
* sizeof(struct perf_branch_entry);
perf_output_put(handle, data->br_stack->nr);
perf_output_copy(handle, data->br_stack->entries, size);
} else {
/*
* we always store at least the value of nr
*/
u64 nr = 0;
perf_output_put(handle, nr);
}
}
if (sample_type & PERF_SAMPLE_REGS_USER) {
u64 abi = data->regs_user.abi;
/*
* If there are no regs to dump, notice it through
* first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
*/
perf_output_put(handle, abi);
if (abi) {
u64 mask = event->attr.sample_regs_user;
perf_output_sample_regs(handle,
data->regs_user.regs,
mask);
}
}
if (sample_type & PERF_SAMPLE_STACK_USER)
perf_output_sample_ustack(handle,
data->stack_user_size,
data->regs_user.regs);
if (sample_type & PERF_SAMPLE_WEIGHT)
perf_output_put(handle, data->weight);
if (sample_type & PERF_SAMPLE_DATA_SRC)
perf_output_put(handle, data->data_src.val);
}
void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs)
{
u64 sample_type = event->attr.sample_type;
header->type = PERF_RECORD_SAMPLE;
header->size = sizeof(*header) + event->header_size;
header->misc = 0;
header->misc |= perf_misc_flags(regs);
__perf_event_header__init_id(header, data, event);
if (sample_type & PERF_SAMPLE_IP)
data->ip = perf_instruction_pointer(regs);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
data->callchain = perf_callchain(event, regs);
if (data->callchain)
size += data->callchain->nr;
header->size += size * sizeof(u64);
}
if (sample_type & PERF_SAMPLE_RAW) {
int size = sizeof(u32);
if (data->raw)
size += data->raw->size;
else
size += sizeof(u32);
WARN_ON_ONCE(size & (sizeof(u64)-1));
header->size += size;
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
int size = sizeof(u64); /* nr */
if (data->br_stack) {
size += data->br_stack->nr
* sizeof(struct perf_branch_entry);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */
int size = sizeof(u64);
perf_sample_regs_user(&data->regs_user, regs);
if (data->regs_user.regs) {
u64 mask = event->attr.sample_regs_user;
size += hweight64(mask) * sizeof(u64);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_STACK_USER) {
/*
* Either we need PERF_SAMPLE_STACK_USER bit to be allways
* processed as the last one or have additional check added
* in case new sample type is added, because we could eat
* up the rest of the sample size.
*/
struct perf_regs_user *uregs = &data->regs_user;
u16 stack_size = event->attr.sample_stack_user;
u16 size = sizeof(u64);
if (!uregs->abi)
perf_sample_regs_user(uregs, regs);
stack_size = perf_sample_ustack_size(stack_size, header->size,
uregs->regs);
/*
* If there is something to dump, add space for the dump
* itself and for the field that tells the dynamic size,
* which is how many have been actually dumped.
*/
if (stack_size)
size += sizeof(u64) + stack_size;
data->stack_user_size = stack_size;
header->size += size;
}
}
static void perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct perf_output_handle handle;
struct perf_event_header header;
/* protect the callchain buffers */
rcu_read_lock();
perf_prepare_sample(&header, data, event, regs);
if (perf_output_begin(&handle, event, header.size))
goto exit;
perf_output_sample(&handle, &header, data, event);
perf_output_end(&handle);
exit:
rcu_read_unlock();
}
/*
* read event_id
*/
struct perf_read_event {
struct perf_event_header header;
u32 pid;
u32 tid;
};
static void
perf_event_read_event(struct perf_event *event,
struct task_struct *task)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
struct perf_read_event read_event = {
.header = {
.type = PERF_RECORD_READ,
.misc = 0,
.size = sizeof(read_event) + event->read_size,
},
.pid = perf_event_pid(event, task),
.tid = perf_event_tid(event, task),
};
int ret;
perf_event_header__init_id(&read_event.header, &sample, event);
ret = perf_output_begin(&handle, event, read_event.header.size);
if (ret)
return;
perf_output_put(&handle, read_event);
perf_output_read(&handle, event);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data);
typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
static void
perf_event_aux_ctx(struct perf_event_context *ctx,
perf_event_aux_match_cb match,
perf_event_aux_output_cb output,
void *data)
{
struct perf_event *event;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state < PERF_EVENT_STATE_INACTIVE)
continue;
if (!event_filter_match(event))
continue;
if (match(event, data))
output(event, data);
}
}
static void
perf_event_aux(perf_event_aux_match_cb match,
perf_event_aux_output_cb output,
void *data,
struct perf_event_context *task_ctx)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
struct pmu *pmu;
int ctxn;
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_aux_ctx(&cpuctx->ctx, match, output, data);
if (task_ctx)
goto next;
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto next;
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx)
perf_event_aux_ctx(ctx, match, output, data);
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
if (task_ctx) {
preempt_disable();
perf_event_aux_ctx(task_ctx, match, output, data);
preempt_enable();
}
rcu_read_unlock();
}
/*
* task tracking -- fork/exit
*
* enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
*/
struct perf_task_event {
struct task_struct *task;
struct perf_event_context *task_ctx;
struct {
struct perf_event_header header;
u32 pid;
u32 ppid;
u32 tid;
u32 ptid;
u64 time;
} event_id;
};
static void perf_event_task_output(struct perf_event *event,
void *data)
{
struct perf_task_event *task_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
struct task_struct *task = task_event->task;
int ret, size = task_event->event_id.header.size;
perf_event_header__init_id(&task_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
task_event->event_id.header.size);
if (ret)
goto out;
task_event->event_id.pid = perf_event_pid(event, task);
task_event->event_id.ppid = perf_event_pid(event, current);
task_event->event_id.tid = perf_event_tid(event, task);
task_event->event_id.ptid = perf_event_tid(event, current);
perf_output_put(&handle, task_event->event_id);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
task_event->event_id.header.size = size;
}
static int perf_event_task_match(struct perf_event *event,
void *data __maybe_unused)
{
return event->attr.comm || event->attr.mmap ||
event->attr.mmap_data || event->attr.task;
}
static void perf_event_task(struct task_struct *task,
struct perf_event_context *task_ctx,
int new)
{
struct perf_task_event task_event;
if (!atomic_read(&nr_comm_events) &&
!atomic_read(&nr_mmap_events) &&
!atomic_read(&nr_task_events))
return;
task_event = (struct perf_task_event){
.task = task,
.task_ctx = task_ctx,
.event_id = {
.header = {
.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
.misc = 0,
.size = sizeof(task_event.event_id),
},
/* .pid */
/* .ppid */
/* .tid */
/* .ptid */
.time = perf_clock(),
},
};
perf_event_aux(perf_event_task_match,
perf_event_task_output,
&task_event,
task_ctx);
}
void perf_event_fork(struct task_struct *task)
{
perf_event_task(task, NULL, 1);
}
/*
* comm tracking
*/
struct perf_comm_event {
struct task_struct *task;
char *comm;
int comm_size;
struct {
struct perf_event_header header;
u32 pid;
u32 tid;
} event_id;
};
static void perf_event_comm_output(struct perf_event *event,
void *data)
{
struct perf_comm_event *comm_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = comm_event->event_id.header.size;
int ret;
perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
comm_event->event_id.header.size);
if (ret)
goto out;
comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
perf_output_put(&handle, comm_event->event_id);
__output_copy(&handle, comm_event->comm,
comm_event->comm_size);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
comm_event->event_id.header.size = size;
}
static int perf_event_comm_match(struct perf_event *event,
void *data __maybe_unused)
{
return event->attr.comm;
}
static void perf_event_comm_event(struct perf_comm_event *comm_event)
{
char comm[TASK_COMM_LEN];
unsigned int size;
memset(comm, 0, sizeof(comm));
strlcpy(comm, comm_event->task->comm, sizeof(comm));
size = ALIGN(strlen(comm)+1, sizeof(u64));
comm_event->comm = comm;
comm_event->comm_size = size;
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
perf_event_aux(perf_event_comm_match,
perf_event_comm_output,
comm_event,
NULL);
}
void perf_event_comm(struct task_struct *task)
{
struct perf_comm_event comm_event;
struct perf_event_context *ctx;
int ctxn;
rcu_read_lock();
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (!ctx)
continue;
perf_event_enable_on_exec(ctx);
}
rcu_read_unlock();
if (!atomic_read(&nr_comm_events))
return;
comm_event = (struct perf_comm_event){
.task = task,
/* .comm */
/* .comm_size */
.event_id = {
.header = {
.type = PERF_RECORD_COMM,
.misc = 0,
/* .size */
},
/* .pid */
/* .tid */
},
};
perf_event_comm_event(&comm_event);
}
/*
* mmap tracking
*/
struct perf_mmap_event {
struct vm_area_struct *vma;
const char *file_name;
int file_size;
struct {
struct perf_event_header header;
u32 pid;
u32 tid;
u64 start;
u64 len;
u64 pgoff;
} event_id;
};
static void perf_event_mmap_output(struct perf_event *event,
void *data)
{
struct perf_mmap_event *mmap_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = mmap_event->event_id.header.size;
int ret;
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
mmap_event->event_id.header.size);
if (ret)
goto out;
mmap_event->event_id.pid = perf_event_pid(event, current);
mmap_event->event_id.tid = perf_event_tid(event, current);
perf_output_put(&handle, mmap_event->event_id);
__output_copy(&handle, mmap_event->file_name,
mmap_event->file_size);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
mmap_event->event_id.header.size = size;
}
static int perf_event_mmap_match(struct perf_event *event,
void *data)
{
struct perf_mmap_event *mmap_event = data;
struct vm_area_struct *vma = mmap_event->vma;
int executable = vma->vm_flags & VM_EXEC;
return (!executable && event->attr.mmap_data) ||
(executable && event->attr.mmap);
}
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
{
struct vm_area_struct *vma = mmap_event->vma;
struct file *file = vma->vm_file;
unsigned int size;
char tmp[16];
char *buf = NULL;
const char *name;
memset(tmp, 0, sizeof(tmp));
if (file) {
/*
* d_path works from the end of the rb backwards, so we
* need to add enough zero bytes after the string to handle
* the 64bit alignment we do later.
*/
buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
if (!buf) {
name = strncpy(tmp, "//enomem", sizeof(tmp));
goto got_name;
}
name = d_path(&file->f_path, buf, PATH_MAX);
if (IS_ERR(name)) {
name = strncpy(tmp, "//toolong", sizeof(tmp));
goto got_name;
}
} else {
if (arch_vma_name(mmap_event->vma)) {
name = strncpy(tmp, arch_vma_name(mmap_event->vma),
sizeof(tmp) - 1);
tmp[sizeof(tmp) - 1] = '\0';
goto got_name;
}
if (!vma->vm_mm) {
name = strncpy(tmp, "[vdso]", sizeof(tmp));
goto got_name;
} else if (vma->vm_start <= vma->vm_mm->start_brk &&
vma->vm_end >= vma->vm_mm->brk) {
name = strncpy(tmp, "[heap]", sizeof(tmp));
goto got_name;
} else if (vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack) {
name = strncpy(tmp, "[stack]", sizeof(tmp));
goto got_name;
}
name = strncpy(tmp, "//anon", sizeof(tmp));
goto got_name;
}
got_name:
size = ALIGN(strlen(name)+1, sizeof(u64));
mmap_event->file_name = name;
mmap_event->file_size = size;
if (!(vma->vm_flags & VM_EXEC))
mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
perf_event_aux(perf_event_mmap_match,
perf_event_mmap_output,
mmap_event,
NULL);
kfree(buf);
}
void perf_event_mmap(struct vm_area_struct *vma)
{
struct perf_mmap_event mmap_event;
if (!atomic_read(&nr_mmap_events))
return;
mmap_event = (struct perf_mmap_event){
.vma = vma,
/* .file_name */
/* .file_size */
.event_id = {
.header = {
.type = PERF_RECORD_MMAP,
.misc = PERF_RECORD_MISC_USER,
/* .size */
},
/* .pid */
/* .tid */
.start = vma->vm_start,
.len = vma->vm_end - vma->vm_start,
.pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
},
};
perf_event_mmap_event(&mmap_event);
}
/*
* IRQ throttle logging
*/
static void perf_log_throttle(struct perf_event *event, int enable)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int ret;
struct {
struct perf_event_header header;
u64 time;
u64 id;
u64 stream_id;
} throttle_event = {
.header = {
.type = PERF_RECORD_THROTTLE,
.misc = 0,
.size = sizeof(throttle_event),
},
.time = perf_clock(),
.id = primary_event_id(event),
.stream_id = event->id,
};
if (enable)
throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
perf_event_header__init_id(&throttle_event.header, &sample, event);
ret = perf_output_begin(&handle, event,
throttle_event.header.size);
if (ret)
return;
perf_output_put(&handle, throttle_event);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
/*
* Generic event overflow handling, sampling.
*/
static int __perf_event_overflow(struct perf_event *event,
int throttle, struct perf_sample_data *data,
struct pt_regs *regs)
{
int events = atomic_read(&event->event_limit);
struct hw_perf_event *hwc = &event->hw;
u64 seq;
int ret = 0;
/*
* Non-sampling counters might still use the PMI to fold short
* hardware counters, ignore those.
*/
if (unlikely(!is_sampling_event(event)))
return 0;
seq = __this_cpu_read(perf_throttled_seq);
if (seq != hwc->interrupts_seq) {
hwc->interrupts_seq = seq;
hwc->interrupts = 1;
} else {
hwc->interrupts++;
if (unlikely(throttle
&& hwc->interrupts >= max_samples_per_tick)) {
__this_cpu_inc(perf_throttled_count);
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(event, 0);
ret = 1;
}
}
if (event->attr.freq) {
u64 now = perf_clock();
s64 delta = now - hwc->freq_time_stamp;
hwc->freq_time_stamp = now;
if (delta > 0 && delta < 2*TICK_NSEC)
perf_adjust_period(event, delta, hwc->last_period, true);
}
/*
* XXX event_limit might not quite work as expected on inherited
* events
*/
event->pending_kill = POLL_IN;
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
event->pending_disable = 1;
irq_work_queue(&event->pending);
}
if (event->overflow_handler)
event->overflow_handler(event, data, regs);
else
perf_event_output(event, data, regs);
if (*perf_event_fasync(event) && event->pending_kill) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
}
return ret;
}
int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
return __perf_event_overflow(event, 1, data, regs);
}
/*
* Generic software event infrastructure
*/
struct swevent_htable {
struct swevent_hlist *swevent_hlist;
struct mutex hlist_mutex;
int hlist_refcount;
/* Recursion avoidance in each contexts */
int recursion[PERF_NR_CONTEXTS];
/* Keeps track of cpu being initialized/exited */
bool online;
};
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
/*
* We directly increment event->count and keep a second value in
* event->hw.period_left to count intervals. This period event
* is kept in the range [-sample_period, 0] so that we can use the
* sign as trigger.
*/
static u64 perf_swevent_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 period = hwc->last_period;
u64 nr, offset;
s64 old, val;
hwc->last_period = hwc->sample_period;
again:
old = val = local64_read(&hwc->period_left);
if (val < 0)
return 0;
nr = div64_u64(period + val, period);
offset = nr * period;
val -= offset;
if (local64_cmpxchg(&hwc->period_left, old, val) != old)
goto again;
return nr;
}
static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
int throttle = 0;
if (!overflow)
overflow = perf_swevent_set_period(event);
if (hwc->interrupts == MAX_INTERRUPTS)
return;
for (; overflow; overflow--) {
if (__perf_event_overflow(event, throttle,
data, regs)) {
/*
* We inhibit the overflow from happening when
* hwc->interrupts == MAX_INTERRUPTS.
*/
break;
}
throttle = 1;
}
}
static void perf_swevent_event(struct perf_event *event, u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
local64_add(nr, &event->count);
if (!regs)
return;
if (!is_sampling_event(event))
return;
if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
data->period = nr;
return perf_swevent_overflow(event, 1, data, regs);
} else
data->period = event->hw.last_period;
if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
return perf_swevent_overflow(event, 1, data, regs);
if (local64_add_negative(nr, &hwc->period_left))
return;
perf_swevent_overflow(event, 0, data, regs);
}
static int perf_exclude_event(struct perf_event *event,
struct pt_regs *regs)
{
if (event->hw.state & PERF_HES_STOPPED)
return 1;
if (regs) {
if (event->attr.exclude_user && user_mode(regs))
return 1;
if (event->attr.exclude_kernel && !user_mode(regs))
return 1;
}
return 0;
}
static int perf_swevent_match(struct perf_event *event,
enum perf_type_id type,
u32 event_id,
struct perf_sample_data *data,
struct pt_regs *regs)
{
if (event->attr.type != type)
return 0;
if (event->attr.config != event_id)
return 0;
if (perf_exclude_event(event, regs))
return 0;
return 1;
}
static inline u64 swevent_hash(u64 type, u32 event_id)
{
u64 val = event_id | (type << 32);
return hash_64(val, SWEVENT_HLIST_BITS);
}
static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
{
u64 hash = swevent_hash(type, event_id);
return &hlist->heads[hash];
}
/* For the read side: events when they trigger */
static inline struct hlist_head *
find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
{
struct swevent_hlist *hlist;
hlist = rcu_dereference(swhash->swevent_hlist);
if (!hlist)
return NULL;
return __find_swevent_head(hlist, type, event_id);
}
/* For the event head insertion and removal in the hlist */
static inline struct hlist_head *
find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
{
struct swevent_hlist *hlist;
u32 event_id = event->attr.config;
u64 type = event->attr.type;
/*
* Event scheduling is always serialized against hlist allocation
* and release. Which makes the protected version suitable here.
* The context lock guarantees that.
*/
hlist = rcu_dereference_protected(swhash->swevent_hlist,
lockdep_is_held(&event->ctx->lock));
if (!hlist)
return NULL;
return __find_swevent_head(hlist, type, event_id);
}
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct perf_event *event;
struct hlist_head *head;
rcu_read_lock();
head = find_swevent_head_rcu(swhash, type, event_id);
if (!head)
goto end;
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_event(event, nr, data, regs);
}
end:
rcu_read_unlock();
}
int perf_swevent_get_recursion_context(void)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
return get_recursion_context(swhash->recursion);
}
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
inline void perf_swevent_put_recursion_context(int rctx)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
put_recursion_context(swhash->recursion, rctx);
}
void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data;
int rctx;
preempt_disable_notrace();
rctx = perf_swevent_get_recursion_context();
if (rctx < 0)
return;
perf_sample_data_init(&data, addr, 0);
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
perf_swevent_put_recursion_context(rctx);
preempt_enable_notrace();
}
static void perf_swevent_read(struct perf_event *event)
{
}
static int perf_swevent_add(struct perf_event *event, int flags)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct hw_perf_event *hwc = &event->hw;
struct hlist_head *head;
if (is_sampling_event(event)) {
hwc->last_period = hwc->sample_period;
perf_swevent_set_period(event);
}
hwc->state = !(flags & PERF_EF_START);
head = find_swevent_head(swhash, event);
if (!head) {
/*
* We can race with cpu hotplug code. Do not
* WARN if the cpu just got unplugged.
*/
WARN_ON_ONCE(swhash->online);
return -EINVAL;
}
hlist_add_head_rcu(&event->hlist_entry, head);
return 0;
}
static void perf_swevent_del(struct perf_event *event, int flags)
{
hlist_del_rcu(&event->hlist_entry);
}
static void perf_swevent_start(struct perf_event *event, int flags)
{
event->hw.state = 0;
}
static void perf_swevent_stop(struct perf_event *event, int flags)
{
event->hw.state = PERF_HES_STOPPED;
}
/* Deref the hlist from the update side */
static inline struct swevent_hlist *
swevent_hlist_deref(struct swevent_htable *swhash)
{
return rcu_dereference_protected(swhash->swevent_hlist,
lockdep_is_held(&swhash->hlist_mutex));
}
static void swevent_hlist_release(struct swevent_htable *swhash)
{
struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
if (!hlist)
return;
rcu_assign_pointer(swhash->swevent_hlist, NULL);
kfree_rcu(hlist, rcu_head);
}
static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
if (!--swhash->hlist_refcount)
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
}
static void swevent_hlist_put(struct perf_event *event)
{
int cpu;
if (event->cpu != -1) {
swevent_hlist_put_cpu(event, event->cpu);
return;
}
for_each_possible_cpu(cpu)
swevent_hlist_put_cpu(event, cpu);
}
static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
int err = 0;
mutex_lock(&swhash->hlist_mutex);
if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
struct swevent_hlist *hlist;
hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
if (!hlist) {
err = -ENOMEM;
goto exit;
}
rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
swhash->hlist_refcount++;
exit:
mutex_unlock(&swhash->hlist_mutex);
return err;
}
static int swevent_hlist_get(struct perf_event *event)
{
int err;
int cpu, failed_cpu;
if (event->cpu != -1)
return swevent_hlist_get_cpu(event, event->cpu);
get_online_cpus();
for_each_possible_cpu(cpu) {
err = swevent_hlist_get_cpu(event, cpu);
if (err) {
failed_cpu = cpu;
goto fail;
}
}
put_online_cpus();
return 0;
fail:
for_each_possible_cpu(cpu) {
if (cpu == failed_cpu)
break;
swevent_hlist_put_cpu(event, cpu);
}
put_online_cpus();
return err;
}
struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
static void sw_perf_event_destroy(struct perf_event *event)
{
u64 event_id = event->attr.config;
WARN_ON(event->parent);
static_key_slow_dec(&perf_swevent_enabled[event_id]);
swevent_hlist_put(event);
}
static int perf_swevent_init(struct perf_event *event)
{
u64 event_id = event->attr.config;
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
switch (event_id) {
case PERF_COUNT_SW_CPU_CLOCK:
case PERF_COUNT_SW_TASK_CLOCK:
return -ENOENT;
default:
break;
}
if (event_id >= PERF_COUNT_SW_MAX)
return -ENOENT;
if (!event->parent) {
int err;
err = swevent_hlist_get(event);
if (err)
return err;
static_key_slow_inc(&perf_swevent_enabled[event_id]);
event->destroy = sw_perf_event_destroy;
}
return 0;
}
static int perf_swevent_event_idx(struct perf_event *event)
{
return 0;
}
static struct pmu perf_swevent = {
.task_ctx_nr = perf_sw_context,
.event_init = perf_swevent_init,
.add = perf_swevent_add,
.del = perf_swevent_del,
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
.event_idx = perf_swevent_event_idx,
};
#ifdef CONFIG_EVENT_TRACING
static int perf_tp_filter_match(struct perf_event *event,
struct perf_sample_data *data)
{
void *record = data->raw->data;
/* only top level events have filters set */
if (event->parent)
event = event->parent;
if (likely(!event->filter) || filter_match_preds(event->filter, record))
return 1;
return 0;
}
static int perf_tp_event_match(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
if (event->hw.state & PERF_HES_STOPPED)
return 0;
/*
* All tracepoints are from kernel-space.
*/
if (event->attr.exclude_kernel)
return 0;
if (!perf_tp_filter_match(event, data))
return 0;
return 1;
}
void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx,
struct task_struct *task)
{
struct perf_sample_data data;
struct perf_event *event;
struct perf_raw_record raw = {
.size = entry_size,
.data = record,
};
perf_sample_data_init(&data, addr, 0);
data.raw = &raw;
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
/*
* If we got specified a target task, also iterate its context and
* deliver this event there too.
*/
if (task && task != current) {
struct perf_event_context *ctx;
struct trace_entry *entry = record;
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
if (!ctx)
goto unlock;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->attr.type != PERF_TYPE_TRACEPOINT)
continue;
if (event->attr.config != entry->type)
continue;
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
unlock:
rcu_read_unlock();
}
perf_swevent_put_recursion_context(rctx);
}
EXPORT_SYMBOL_GPL(perf_tp_event);
static void tp_perf_event_destroy(struct perf_event *event)
{
perf_trace_destroy(event);
}
static int perf_tp_event_init(struct perf_event *event)
{
int err;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -ENOENT;
/*
* no branch sampling for tracepoint events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
err = perf_trace_init(event);
if (err)
return err;
event->destroy = tp_perf_event_destroy;
return 0;
}
static struct pmu perf_tracepoint = {
.task_ctx_nr = perf_sw_context,
.event_init = perf_tp_event_init,
.add = perf_trace_add,
.del = perf_trace_del,
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
.event_idx = perf_swevent_event_idx,
};
static inline void perf_tp_register(void)
{
perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
}
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
char *filter_str;
int ret;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -EINVAL;
filter_str = strndup_user(arg, PAGE_SIZE);
if (IS_ERR(filter_str))
return PTR_ERR(filter_str);
ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
kfree(filter_str);
return ret;
}
static void perf_event_free_filter(struct perf_event *event)
{
ftrace_profile_free_filter(event);
}
#else
static inline void perf_tp_register(void)
{
}
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
return -ENOENT;
}
static void perf_event_free_filter(struct perf_event *event)
{
}
#endif /* CONFIG_EVENT_TRACING */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
void perf_bp_event(struct perf_event *bp, void *data)
{
struct perf_sample_data sample;
struct pt_regs *regs = data;
perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
if (!bp->hw.state && !perf_exclude_event(bp, regs))
perf_swevent_event(bp, 1, &sample, regs);
}
#endif
/*
* hrtimer based swevent callback
*/
static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
{
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct pt_regs *regs;
struct perf_event *event;
u64 period;
event = container_of(hrtimer, struct perf_event, hw.hrtimer);
if (event->state != PERF_EVENT_STATE_ACTIVE)
return HRTIMER_NORESTART;
event->pmu->read(event);
perf_sample_data_init(&data, 0, event->hw.last_period);
regs = get_irq_regs();
if (regs && !perf_exclude_event(event, regs)) {
if (!(event->attr.exclude_idle && is_idle_task(current)))
if (__perf_event_overflow(event, 1, &data, regs))
ret = HRTIMER_NORESTART;
}
period = max_t(u64, 10000, event->hw.sample_period);
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
return ret;
}
static void perf_swevent_start_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
s64 period;
if (!is_sampling_event(event))
return;
period = local64_read(&hwc->period_left);
if (period) {
if (period < 0)
period = 10000;
local64_set(&hwc->period_left, 0);
} else {
period = max_t(u64, 10000, hwc->sample_period);
}
__hrtimer_start_range_ns(&hwc->hrtimer,
ns_to_ktime(period), 0,
HRTIMER_MODE_REL_PINNED, 0);
}
static void perf_swevent_cancel_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (is_sampling_event(event)) {
ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
local64_set(&hwc->period_left, ktime_to_ns(remaining));
hrtimer_cancel(&hwc->hrtimer);
}
}
static void perf_swevent_init_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (!is_sampling_event(event))
return;
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
/*
* Since hrtimers have a fixed rate, we can do a static freq->period
* mapping and avoid the whole period adjust feedback stuff.
*/
if (event->attr.freq) {
long freq = event->attr.sample_freq;
event->attr.sample_period = NSEC_PER_SEC / freq;
hwc->sample_period = event->attr.sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
hwc->last_period = hwc->sample_period;
event->attr.freq = 0;
}
}
/*
* Software event: cpu wall time clock
*/
static void cpu_clock_event_update(struct perf_event *event)
{
s64 prev;
u64 now;
now = local_clock();
prev = local64_xchg(&event->hw.prev_count, now);
local64_add(now - prev, &event->count);
}
static void cpu_clock_event_start(struct perf_event *event, int flags)
{
local64_set(&event->hw.prev_count, local_clock());
perf_swevent_start_hrtimer(event);
}
static void cpu_clock_event_stop(struct perf_event *event, int flags)
{
perf_swevent_cancel_hrtimer(event);
cpu_clock_event_update(event);
}
static int cpu_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
cpu_clock_event_start(event, flags);
return 0;
}
static void cpu_clock_event_del(struct perf_event *event, int flags)
{
cpu_clock_event_stop(event, flags);
}
static void cpu_clock_event_read(struct perf_event *event)
{
cpu_clock_event_update(event);
}
static int cpu_clock_event_init(struct perf_event *event)
{
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
perf_swevent_init_hrtimer(event);
return 0;
}
static struct pmu perf_cpu_clock = {
.task_ctx_nr = perf_sw_context,
.event_init = cpu_clock_event_init,
.add = cpu_clock_event_add,
.del = cpu_clock_event_del,
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
.event_idx = perf_swevent_event_idx,
};
/*
* Software event: task time clock
*/
static void task_clock_event_update(struct perf_event *event, u64 now)
{
u64 prev;
s64 delta;
prev = local64_xchg(&event->hw.prev_count, now);
delta = now - prev;
local64_add(delta, &event->count);
}
static void task_clock_event_start(struct perf_event *event, int flags)
{
local64_set(&event->hw.prev_count, event->ctx->time);
perf_swevent_start_hrtimer(event);
}
static void task_clock_event_stop(struct perf_event *event, int flags)
{
perf_swevent_cancel_hrtimer(event);
task_clock_event_update(event, event->ctx->time);
}
static int task_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
task_clock_event_start(event, flags);
return 0;
}
static void task_clock_event_del(struct perf_event *event, int flags)
{
task_clock_event_stop(event, PERF_EF_UPDATE);
}
static void task_clock_event_read(struct perf_event *event)
{
u64 now = perf_clock();
u64 delta = now - event->ctx->timestamp;
u64 time = event->ctx->time + delta;
task_clock_event_update(event, time);
}
static int task_clock_event_init(struct perf_event *event)
{
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
perf_swevent_init_hrtimer(event);
return 0;
}
static struct pmu perf_task_clock = {
.task_ctx_nr = perf_sw_context,
.event_init = task_clock_event_init,
.add = task_clock_event_add,
.del = task_clock_event_del,
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
.event_idx = perf_swevent_event_idx,
};
static void perf_pmu_nop_void(struct pmu *pmu)
{
}
static int perf_pmu_nop_int(struct pmu *pmu)
{
return 0;
}
static void perf_pmu_start_txn(struct pmu *pmu)
{
perf_pmu_disable(pmu);
}
static int perf_pmu_commit_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
return 0;
}
static void perf_pmu_cancel_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
}
static int perf_event_idx_default(struct perf_event *event)
{
return event->hw.idx + 1;
}
/*
* Ensures all contexts with the same task_ctx_nr have the same
* pmu_cpu_context too.
*/
static void *find_pmu_context(int ctxn)
{
struct pmu *pmu;
if (ctxn < 0)
return NULL;
list_for_each_entry(pmu, &pmus, entry) {
if (pmu->task_ctx_nr == ctxn)
return pmu->pmu_cpu_context;
}
return NULL;
}
static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
{
int cpu;
for_each_possible_cpu(cpu) {
struct perf_cpu_context *cpuctx;
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
if (cpuctx->unique_pmu == old_pmu)
cpuctx->unique_pmu = pmu;
}
}
static void free_pmu_context(struct pmu *pmu)
{
struct pmu *i;
mutex_lock(&pmus_lock);
/*
* Like a real lame refcount.
*/
list_for_each_entry(i, &pmus, entry) {
if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
update_pmu_context(i, pmu);
goto out;
}
}
free_percpu(pmu->pmu_cpu_context);
out:
mutex_unlock(&pmus_lock);
}
static struct idr pmu_idr;
static ssize_t
type_show(struct device *dev, struct device_attribute *attr, char *page)
{
struct pmu *pmu = dev_get_drvdata(dev);
return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
}
static struct device_attribute pmu_dev_attrs[] = {
__ATTR_RO(type),
__ATTR_NULL,
};
static int pmu_bus_running;
static struct bus_type pmu_bus = {
.name = "event_source",
.dev_attrs = pmu_dev_attrs,
};
static void pmu_dev_release(struct device *dev)
{
kfree(dev);
}
static int pmu_dev_alloc(struct pmu *pmu)
{
int ret = -ENOMEM;
pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!pmu->dev)
goto out;
pmu->dev->groups = pmu->attr_groups;
device_initialize(pmu->dev);
ret = dev_set_name(pmu->dev, "%s", pmu->name);
if (ret)
goto free_dev;
dev_set_drvdata(pmu->dev, pmu);
pmu->dev->bus = &pmu_bus;
pmu->dev->release = pmu_dev_release;
ret = device_add(pmu->dev);
if (ret)
goto free_dev;
out:
return ret;
free_dev:
put_device(pmu->dev);
goto out;
}
static struct lock_class_key cpuctx_mutex;
static struct lock_class_key cpuctx_lock;
int perf_pmu_register(struct pmu *pmu, char *name, int type)
{
int cpu, ret;
mutex_lock(&pmus_lock);
ret = -ENOMEM;
pmu->pmu_disable_count = alloc_percpu(int);
if (!pmu->pmu_disable_count)
goto unlock;
pmu->type = -1;
if (!name)
goto skip_type;
pmu->name = name;
if (type < 0) {
type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
if (type < 0) {
ret = type;
goto free_pdc;
}
}
pmu->type = type;
if (pmu_bus_running) {
ret = pmu_dev_alloc(pmu);
if (ret)
goto free_idr;
}
skip_type:
pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
if (pmu->pmu_cpu_context)
goto got_cpu_context;
ret = -ENOMEM;
pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
if (!pmu->pmu_cpu_context)
goto free_dev;
for_each_possible_cpu(cpu) {
struct perf_cpu_context *cpuctx;
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
__perf_event_init_context(&cpuctx->ctx);
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu;
cpuctx->jiffies_interval = 1;
INIT_LIST_HEAD(&cpuctx->rotation_list);
cpuctx->unique_pmu = pmu;
}
got_cpu_context:
if (!pmu->start_txn) {
if (pmu->pmu_enable) {
/*
* If we have pmu_enable/pmu_disable calls, install
* transaction stubs that use that to try and batch
* hardware accesses.
*/
pmu->start_txn = perf_pmu_start_txn;
pmu->commit_txn = perf_pmu_commit_txn;
pmu->cancel_txn = perf_pmu_cancel_txn;
} else {
pmu->start_txn = perf_pmu_nop_void;
pmu->commit_txn = perf_pmu_nop_int;
pmu->cancel_txn = perf_pmu_nop_void;
}
}
if (!pmu->pmu_enable) {
pmu->pmu_enable = perf_pmu_nop_void;
pmu->pmu_disable = perf_pmu_nop_void;
}
if (!pmu->event_idx)
pmu->event_idx = perf_event_idx_default;
list_add_rcu(&pmu->entry, &pmus);
ret = 0;
unlock:
mutex_unlock(&pmus_lock);
return ret;
free_dev:
device_del(pmu->dev);
put_device(pmu->dev);
free_idr:
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
free_pdc:
free_percpu(pmu->pmu_disable_count);
goto unlock;
}
void perf_pmu_unregister(struct pmu *pmu)
{
mutex_lock(&pmus_lock);
list_del_rcu(&pmu->entry);
mutex_unlock(&pmus_lock);
/*
* We dereference the pmu list under both SRCU and regular RCU, so
* synchronize against both of those.
*/
synchronize_srcu(&pmus_srcu);
synchronize_rcu();
free_percpu(pmu->pmu_disable_count);
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
device_del(pmu->dev);
put_device(pmu->dev);
free_pmu_context(pmu);
}
struct pmu *perf_init_event(struct perf_event *event)
{
struct pmu *pmu = NULL;
int idx;
int ret;
idx = srcu_read_lock(&pmus_srcu);
rcu_read_lock();
pmu = idr_find(&pmu_idr, event->attr.type);
rcu_read_unlock();
if (pmu) {
event->pmu = pmu;
ret = pmu->event_init(event);
if (ret)
pmu = ERR_PTR(ret);
goto unlock;
}
list_for_each_entry_rcu(pmu, &pmus, entry) {
event->pmu = pmu;
ret = pmu->event_init(event);
if (!ret)
goto unlock;
if (ret != -ENOENT) {
pmu = ERR_PTR(ret);
goto unlock;
}
}
pmu = ERR_PTR(-ENOENT);
unlock:
srcu_read_unlock(&pmus_srcu, idx);
return pmu;
}
/*
* Allocate and initialize a event structure
*/
static struct perf_event *
perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
struct perf_event *group_leader,
struct perf_event *parent_event,
perf_overflow_handler_t overflow_handler,
void *context)
{
struct pmu *pmu;
struct perf_event *event;
struct hw_perf_event *hwc;
long err;
if ((unsigned)cpu >= nr_cpu_ids) {
if (!task || cpu != -1)
return ERR_PTR(-EINVAL);
}
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return ERR_PTR(-ENOMEM);
/*
* Single events are their own group leaders, with an
* empty sibling list:
*/
if (!group_leader)
group_leader = event;
mutex_init(&event->child_mutex);
INIT_LIST_HEAD(&event->child_list);
INIT_LIST_HEAD(&event->group_entry);
INIT_LIST_HEAD(&event->event_entry);
INIT_LIST_HEAD(&event->sibling_list);
INIT_LIST_HEAD(&event->rb_entry);
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending, perf_pending_event);
mutex_init(&event->mmap_mutex);
atomic_long_set(&event->refcount, 1);
event->cpu = cpu;
event->attr = *attr;
event->group_leader = group_leader;
event->pmu = NULL;
event->oncpu = -1;
event->parent = parent_event;
event->ns = get_pid_ns(task_active_pid_ns(current));
event->id = atomic64_inc_return(&perf_event_id);
event->state = PERF_EVENT_STATE_INACTIVE;
if (task) {
event->attach_state = PERF_ATTACH_TASK;
if (attr->type == PERF_TYPE_TRACEPOINT)
event->hw.tp_target = task;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* hw_breakpoint is a bit difficult here..
*/
else if (attr->type == PERF_TYPE_BREAKPOINT)
event->hw.bp_target = task;
#endif
}
if (!overflow_handler && parent_event) {
overflow_handler = parent_event->overflow_handler;
context = parent_event->overflow_handler_context;
}
event->overflow_handler = overflow_handler;
event->overflow_handler_context = context;
perf_event__state_init(event);
pmu = NULL;
hwc = &event->hw;
hwc->sample_period = attr->sample_period;
if (attr->freq && attr->sample_freq)
hwc->sample_period = 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
/*
* we currently do not support PERF_FORMAT_GROUP on inherited events
*/
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
goto done;
pmu = perf_init_event(event);
done:
err = 0;
if (!pmu)
err = -EINVAL;
else if (IS_ERR(pmu))
err = PTR_ERR(pmu);
if (err) {
if (event->ns)
put_pid_ns(event->ns);
kfree(event);
return ERR_PTR(err);
}
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_inc(&perf_sched_events.key);
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
atomic_inc(&nr_comm_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
err = get_callchain_buffers();
if (err) {
free_event(event);
return ERR_PTR(err);
}
}
if (has_branch_stack(event)) {
static_key_slow_inc(&perf_sched_events.key);
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_inc(&per_cpu(perf_branch_stack_events,
event->cpu));
}
}
return event;
}
static int perf_copy_attr(struct perf_event_attr __user *uattr,
struct perf_event_attr *attr)
{
u32 size;
int ret;
if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
return -EFAULT;
/*
* zero the full structure, so that a short copy will be nice.
*/
memset(attr, 0, sizeof(*attr));
ret = get_user(size, &uattr->size);
if (ret)
return ret;
if (size > PAGE_SIZE) /* silly large */
goto err_size;
if (!size) /* abi compat */
size = PERF_ATTR_SIZE_VER0;
if (size < PERF_ATTR_SIZE_VER0)
goto err_size;
/*
* If we're handed a bigger struct than we know of,
* ensure all the unknown bits are 0 - i.e. new
* user-space does not rely on any kernel feature
* extensions we dont know about yet.
*/
if (size > sizeof(*attr)) {
unsigned char __user *addr;
unsigned char __user *end;
unsigned char val;
addr = (void __user *)uattr + sizeof(*attr);
end = (void __user *)uattr + size;
for (; addr < end; addr++) {
ret = get_user(val, addr);
if (ret)
return ret;
if (val)
goto err_size;
}
size = sizeof(*attr);
}
ret = copy_from_user(attr, uattr, size);
if (ret)
return -EFAULT;
if (attr->__reserved_1)
return -EINVAL;
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
return -EINVAL;
if (attr->read_format & ~(PERF_FORMAT_MAX-1))
return -EINVAL;
if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
u64 mask = attr->branch_sample_type;
/* only using defined bits */
if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
return -EINVAL;
/* at least one branch bit must be set */
if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
return -EINVAL;
/* kernel level capture: check permissions */
if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
&& perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
/* propagate priv level, when not set for branch */
if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
/* exclude_kernel checked on syscall entry */
if (!attr->exclude_kernel)
mask |= PERF_SAMPLE_BRANCH_KERNEL;
if (!attr->exclude_user)
mask |= PERF_SAMPLE_BRANCH_USER;
if (!attr->exclude_hv)
mask |= PERF_SAMPLE_BRANCH_HV;
/*
* adjust user setting (for HW filter setup)
*/
attr->branch_sample_type = mask;
}
}
if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
ret = perf_reg_validate(attr->sample_regs_user);
if (ret)
return ret;
}
if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
if (!arch_perf_have_user_stack_dump())
return -ENOSYS;
/*
* We have __u32 type for the size, but so far
* we can only use __u16 as maximum due to the
* __u16 sample size limit.
*/
if (attr->sample_stack_user >= USHRT_MAX)
ret = -EINVAL;
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
ret = -EINVAL;
}
out:
return ret;
err_size:
put_user(sizeof(*attr), &uattr->size);
ret = -E2BIG;
goto out;
}
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{
struct ring_buffer *rb = NULL, *old_rb = NULL;
int ret = -EINVAL;
if (!output_event)
goto set;
/* don't allow circular references */
if (event == output_event)
goto out;
/*
* Don't allow cross-cpu buffers
*/
if (output_event->cpu != event->cpu)
goto out;
/*
* If its not a per-cpu rb, it must be the same task.
*/
if (output_event->cpu == -1 && output_event->ctx != event->ctx)
goto out;
set:
mutex_lock(&event->mmap_mutex);
/* Can't redirect output if we've got an active mmap() */
if (atomic_read(&event->mmap_count))
goto unlock;
old_rb = event->rb;
if (output_event) {
/* get the rb we want to redirect to */
rb = ring_buffer_get(output_event);
if (!rb)
goto unlock;
}
if (old_rb)
ring_buffer_detach(event, old_rb);
if (rb)
ring_buffer_attach(event, rb);
rcu_assign_pointer(event->rb, rb);
if (old_rb) {
ring_buffer_put(old_rb);
/*
* Since we detached before setting the new rb, so that we
* could attach the new rb, we could have missed a wakeup.
* Provide it now.
*/
wake_up_all(&event->waitq);
}
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
out:
return ret;
}
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
* @attr_uptr: event_id type attributes for monitoring/sampling
* @pid: target pid
* @cpu: target cpu
* @group_fd: group leader event fd
*/
SYSCALL_DEFINE5(perf_event_open,
struct perf_event_attr __user *, attr_uptr,
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{
struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr;
struct perf_event_context *ctx;
struct file *event_file = NULL;
struct fd group = {NULL, 0};
struct task_struct *task = NULL;
struct pmu *pmu;
int event_fd;
int move_group = 0;
int err;
/* for future expandability... */
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
if (!attr.exclude_kernel) {
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
}
if (attr.freq) {
if (attr.sample_freq > sysctl_perf_event_sample_rate)
return -EINVAL;
} else {
if (attr.sample_period & (1ULL << 63))
return -EINVAL;
}
/*
* In cgroup mode, the pid argument is used to pass the fd
* opened to the cgroup directory in cgroupfs. The cpu argument
* designates the cpu on which to monitor threads from that
* cgroup.
*/
if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
return -EINVAL;
event_fd = get_unused_fd();
if (event_fd < 0)
return event_fd;
if (group_fd != -1) {
err = perf_fget_light(group_fd, &group);
if (err)
goto err_fd;
group_leader = group.file->private_data;
if (flags & PERF_FLAG_FD_OUTPUT)
output_event = group_leader;
if (flags & PERF_FLAG_FD_NO_GROUP)
group_leader = NULL;
}
if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
task = find_lively_task_by_vpid(pid);
if (IS_ERR(task)) {
err = PTR_ERR(task);
goto err_group_fd;
}
}
get_online_cpus();
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
NULL, NULL);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_task;
}
if (flags & PERF_FLAG_PID_CGROUP) {
err = perf_cgroup_connect(pid, event, &attr, group_leader);
if (err)
goto err_alloc;
/*
* one more event:
* - that has cgroup constraint on event->cpu
* - that may need work on context switch
*/
atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_inc(&perf_sched_events.key);
}
/*
* Special case software events and allow them to be part of
* any hardware group.
*/
pmu = event->pmu;
if (group_leader &&
(is_software_event(event) != is_software_event(group_leader))) {
if (is_software_event(event)) {
/*
* If event and group_leader are not both a software
* event, and event is, then group leader is not.
*
* Allow the addition of software events to !software
* groups, this is safe because software events never
* fail to schedule.
*/
pmu = group_leader->pmu;
} else if (is_software_event(group_leader) &&
(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
/*
* In case the group is a pure software group, and we
* try to add a hardware event, move the whole group to
* the hardware context.
*/
move_group = 1;
}
}
/*
* Get the target context (task or percpu):
*/
ctx = find_get_context(pmu, task, event->cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_alloc;
}
if (task) {
put_task_struct(task);
task = NULL;
}
/*
* Look up the group leader (we will attach this event to it):
*/
if (group_leader) {
err = -EINVAL;
/*
* Do not allow a recursive hierarchy (this new sibling
* becoming part of another group-sibling):
*/
if (group_leader->group_leader != group_leader)
goto err_context;
/*
* Do not allow to attach to a group in a different
* task or CPU context:
*/
if (move_group) {
if (group_leader->ctx->type != ctx->type)
goto err_context;
} else {
if (group_leader->ctx != ctx)
goto err_context;
}
/*
* Only a group leader can be exclusive or pinned
*/
if (attr.exclusive || attr.pinned)
goto err_context;
}
if (output_event) {
err = perf_event_set_output(event, output_event);
if (err)
goto err_context;
}
event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
if (IS_ERR(event_file)) {
err = PTR_ERR(event_file);
goto err_context;
}
if (move_group) {
struct perf_event_context *gctx = group_leader->ctx;
mutex_lock(&gctx->mutex);
perf_remove_from_context(group_leader, false);
/*
* Removing from the context ends up with disabled
* event. What we want here is event in the initial
* startup state, ready to be add into new context.
*/
perf_event__state_init(group_leader);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_remove_from_context(sibling, false);
perf_event__state_init(sibling);
put_ctx(gctx);
}
mutex_unlock(&gctx->mutex);
put_ctx(gctx);
}
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
if (move_group) {
synchronize_rcu();
perf_install_in_context(ctx, group_leader, group_leader->cpu);
get_ctx(ctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_install_in_context(ctx, sibling, sibling->cpu);
get_ctx(ctx);
}
}
perf_install_in_context(ctx, event, event->cpu);
++ctx->generation;
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
put_online_cpus();
event->owner = current;
mutex_lock(¤t->perf_event_mutex);
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
mutex_unlock(¤t->perf_event_mutex);
/*
* Precalculate sample_data sizes
*/
perf_event__header_size(event);
perf_event__id_header_size(event);
/*
* Drop the reference on the group_event after placing the
* new event on the sibling_list. This ensures destruction
* of the group leader will find the pointer to itself in
* perf_group_detach().
*/
fdput(group);
fd_install(event_fd, event_file);
return event_fd;
err_context:
perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
free_event(event);
err_task:
put_online_cpus();
if (task)
put_task_struct(task);
err_group_fd:
fdput(group);
err_fd:
put_unused_fd(event_fd);
return err;
}
/**
* perf_event_create_kernel_counter
*
* @attr: attributes of the counter to create
* @cpu: cpu in which the counter is bound
* @task: task to profile (NULL for percpu)
*/
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
perf_overflow_handler_t overflow_handler,
void *context)
{
struct perf_event_context *ctx;
struct perf_event *event;
int err;
/*
* Get the target context (task or percpu):
*/
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
overflow_handler, context);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err;
}
ctx = find_get_context(event->pmu, task, cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_free;
}
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
return event;
err_free:
free_event(event);
err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
{
struct perf_event_context *src_ctx;
struct perf_event_context *dst_ctx;
struct perf_event *event, *tmp;
LIST_HEAD(events);
src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
mutex_lock(&src_ctx->mutex);
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
event_entry) {
perf_remove_from_context(event, false);
put_ctx(src_ctx);
list_add(&event->event_entry, &events);
}
mutex_unlock(&src_ctx->mutex);
synchronize_rcu();
mutex_lock(&dst_ctx->mutex);
list_for_each_entry_safe(event, tmp, &events, event_entry) {
list_del(&event->event_entry);
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
perf_install_in_context(dst_ctx, event, dst_cpu);
get_ctx(dst_ctx);
}
mutex_unlock(&dst_ctx->mutex);
}
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
static void sync_child_event(struct perf_event *child_event,
struct task_struct *child)
{
struct perf_event *parent_event = child_event->parent;
u64 child_val;
if (child_event->attr.inherit_stat)
perf_event_read_event(child_event, child);
child_val = perf_event_count(child_event);
/*
* Add back the child's count to the parent's count:
*/
atomic64_add(child_val, &parent_event->child_count);
atomic64_add(child_event->total_time_enabled,
&parent_event->child_total_time_enabled);
atomic64_add(child_event->total_time_running,
&parent_event->child_total_time_running);
/*
* Remove this event from the parent's list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_del_init(&child_event->child_list);
mutex_unlock(&parent_event->child_mutex);
/*
* Release the parent event, if this was the last
* reference to it.
*/
put_event(parent_event);
}
static void
__perf_event_exit_task(struct perf_event *child_event,
struct perf_event_context *child_ctx,
struct task_struct *child)
{
perf_remove_from_context(child_event, !!child_event->parent);
/*
* It can happen that the parent exits first, and has events
* that are still around due to the child reference. These
* events need to be zapped.
*/
if (child_event->parent) {
sync_child_event(child_event, child);
free_event(child_event);
}
}
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
{
struct perf_event *child_event, *tmp;
struct perf_event_context *child_ctx;
unsigned long flags;
if (likely(!child->perf_event_ctxp[ctxn])) {
perf_event_task(child, NULL, 0);
return;
}
local_irq_save(flags);
/*
* We can't reschedule here because interrupts are disabled,
* and either child is current or it is a task that can't be
* scheduled, so we are now safe from rescheduling changing
* our context.
*/
child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
/*
* Take the context lock here so that if find_get_context is
* reading child->perf_event_ctxp, we wait until it has
* incremented the context's refcount before we do put_ctx below.
*/
raw_spin_lock(&child_ctx->lock);
task_ctx_sched_out(child_ctx);
child->perf_event_ctxp[ctxn] = NULL;
/*
* If this context is a clone; unclone it so it can't get
* swapped to another process while we're removing all
* the events from it.
*/
unclone_ctx(child_ctx);
update_context_time(child_ctx);
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Report the task dead after unscheduling the events so that we
* won't get any samples after PERF_RECORD_EXIT. We can however still
* get a few PERF_RECORD_READ events.
*/
perf_event_task(child, child_ctx, 0);
/*
* We can recurse on the same lock type through:
*
* __perf_event_exit_task()
* sync_child_event()
* put_event()
* mutex_lock(&ctx->mutex)
*
* But since its the parent context it won't be the same instance.
*/
mutex_lock(&child_ctx->mutex);
again:
list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
group_entry)
__perf_event_exit_task(child_event, child_ctx, child);
list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
group_entry)
__perf_event_exit_task(child_event, child_ctx, child);
/*
* If the last event was a group event, it will have appended all
* its siblings to the list, but we obtained 'tmp' before that which
* will still point to the list head terminating the iteration.
*/
if (!list_empty(&child_ctx->pinned_groups) ||
!list_empty(&child_ctx->flexible_groups))
goto again;
mutex_unlock(&child_ctx->mutex);
put_ctx(child_ctx);
}
/*
* When a child task exits, feed back event values to parent events.
*/
void perf_event_exit_task(struct task_struct *child)
{
struct perf_event *event, *tmp;
int ctxn;
mutex_lock(&child->perf_event_mutex);
list_for_each_entry_safe(event, tmp, &child->perf_event_list,
owner_entry) {
list_del_init(&event->owner_entry);
/*
* Ensure the list deletion is visible before we clear
* the owner, closes a race against perf_release() where
* we need to serialize on the owner->perf_event_mutex.
*/
smp_wmb();
event->owner = NULL;
}
mutex_unlock(&child->perf_event_mutex);
for_each_task_context_nr(ctxn)
perf_event_exit_task_context(child, ctxn);
}
static void perf_free_event(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *parent = event->parent;
if (WARN_ON_ONCE(!parent))
return;
mutex_lock(&parent->child_mutex);
list_del_init(&event->child_list);
mutex_unlock(&parent->child_mutex);
put_event(parent);
perf_group_detach(event);
list_del_event(event, ctx);
free_event(event);
}
/*
* free an unexposed, unused context as created by inheritance by
* perf_event_init_task below, used by fork() in case of fail.
*/
void perf_event_free_task(struct task_struct *task)
{
struct perf_event_context *ctx;
struct perf_event *event, *tmp;
int ctxn;
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (!ctx)
continue;
mutex_lock(&ctx->mutex);
again:
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
group_entry)
perf_free_event(event, ctx);
list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
group_entry)
perf_free_event(event, ctx);
if (!list_empty(&ctx->pinned_groups) ||
!list_empty(&ctx->flexible_groups))
goto again;
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
}
}
void perf_event_delayed_put(struct task_struct *task)
{
int ctxn;
for_each_task_context_nr(ctxn)
WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
}
/*
* inherit a event from parent task to child task:
*/
static struct perf_event *
inherit_event(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event *group_leader,
struct perf_event_context *child_ctx)
{
struct perf_event *child_event;
unsigned long flags;
/*
* Instead of creating recursive hierarchies of events,
* we link inherited events back to the original parent,
* which has a filp for sure, which we use as the reference
* count:
*/
if (parent_event->parent)
parent_event = parent_event->parent;
child_event = perf_event_alloc(&parent_event->attr,
parent_event->cpu,
child,
group_leader, parent_event,
NULL, NULL);
if (IS_ERR(child_event))
return child_event;
if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
free_event(child_event);
return NULL;
}
get_ctx(child_ctx);
/*
* Make the child state follow the state of the parent event,
* not its attr.disabled bit. We hold the parent's mutex,
* so we won't race with perf_event_{en, dis}able_family.
*/
if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
child_event->state = PERF_EVENT_STATE_INACTIVE;
else
child_event->state = PERF_EVENT_STATE_OFF;
if (parent_event->attr.freq) {
u64 sample_period = parent_event->hw.sample_period;
struct hw_perf_event *hwc = &child_event->hw;
hwc->sample_period = sample_period;
hwc->last_period = sample_period;
local64_set(&hwc->period_left, sample_period);
}
child_event->ctx = child_ctx;
child_event->overflow_handler = parent_event->overflow_handler;
child_event->overflow_handler_context
= parent_event->overflow_handler_context;
/*
* Precalculate sample_data sizes
*/
perf_event__header_size(child_event);
perf_event__id_header_size(child_event);
/*
* Link it up in the child's context:
*/
raw_spin_lock_irqsave(&child_ctx->lock, flags);
add_event_to_ctx(child_event, child_ctx);
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Link this into the parent event's child list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_add_tail(&child_event->child_list, &parent_event->child_list);
mutex_unlock(&parent_event->child_mutex);
return child_event;
}
static int inherit_group(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event_context *child_ctx)
{
struct perf_event *leader;
struct perf_event *sub;
struct perf_event *child_ctr;
leader = inherit_event(parent_event, parent, parent_ctx,
child, NULL, child_ctx);
if (IS_ERR(leader))
return PTR_ERR(leader);
list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
child_ctr = inherit_event(sub, parent, parent_ctx,
child, leader, child_ctx);
if (IS_ERR(child_ctr))
return PTR_ERR(child_ctr);
}
return 0;
}
static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child, int ctxn,
int *inherited_all)
{
int ret;
struct perf_event_context *child_ctx;
if (!event->attr.inherit) {
*inherited_all = 0;
return 0;
}
child_ctx = child->perf_event_ctxp[ctxn];
if (!child_ctx) {
/*
* This is executed from the parent task context, so
* inherit events that have been marked for cloning.
* First allocate and initialize a context for the
* child.
*/
child_ctx = alloc_perf_context(parent_ctx->pmu, child);
if (!child_ctx)
return -ENOMEM;
child->perf_event_ctxp[ctxn] = child_ctx;
}
ret = inherit_group(event, parent, parent_ctx,
child, child_ctx);
if (ret)
*inherited_all = 0;
return ret;
}
/*
* Initialize the perf_event context in task_struct
*/
int perf_event_init_context(struct task_struct *child, int ctxn)
{
struct perf_event_context *child_ctx, *parent_ctx;
struct perf_event_context *cloned_ctx;
struct perf_event *event;
struct task_struct *parent = current;
int inherited_all = 1;
unsigned long flags;
int ret = 0;
if (likely(!parent->perf_event_ctxp[ctxn]))
return 0;
/*
* If the parent's context is a clone, pin it so it won't get
* swapped under us.
*/
parent_ctx = perf_pin_task_context(parent, ctxn);
/*
* No need to check if parent_ctx != NULL here; since we saw
* it non-NULL earlier, the only reason for it to become NULL
* is if we exit, and since we're currently in the middle of
* a fork we can't be exiting at the same time.
*/
/*
* Lock the parent list. No need to lock the child - not PID
* hashed yet and not running, so nobody can access it.
*/
mutex_lock(&parent_ctx->mutex);
/*
* We dont have to disable NMIs - we are only looking at
* the list, not manipulating it:
*/
list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
/*
* We can't hold ctx->lock when iterating the ->flexible_group list due
* to allocations, but we need to prevent rotation because
* rotate_ctx() will change the list from interrupt context.
*/
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 1;
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 0;
child_ctx = child->perf_event_ctxp[ctxn];
if (child_ctx && inherited_all) {
/*
* Mark the child context as a clone of the parent
* context, or of whatever the parent is a clone of.
*
* Note that if the parent is a clone, the holding of
* parent_ctx->lock avoids it from being uncloned.
*/
cloned_ctx = parent_ctx->parent_ctx;
if (cloned_ctx) {
child_ctx->parent_ctx = cloned_ctx;
child_ctx->parent_gen = parent_ctx->parent_gen;
} else {
child_ctx->parent_ctx = parent_ctx;
child_ctx->parent_gen = parent_ctx->generation;
}
get_ctx(child_ctx->parent_ctx);
}
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
put_ctx(parent_ctx);
return ret;
}
/*
* Initialize the perf_event context in task_struct
*/
int perf_event_init_task(struct task_struct *child)
{
int ctxn, ret;
memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
mutex_init(&child->perf_event_mutex);
INIT_LIST_HEAD(&child->perf_event_list);
for_each_task_context_nr(ctxn) {
ret = perf_event_init_context(child, ctxn);
if (ret) {
perf_event_free_task(child);
return ret;
}
}
return 0;
}
static void __init perf_event_init_all_cpus(void)
{
struct swevent_htable *swhash;
int cpu;
for_each_possible_cpu(cpu) {
swhash = &per_cpu(swevent_htable, cpu);
mutex_init(&swhash->hlist_mutex);
INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
}
}
static void __cpuinit perf_event_init_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
swhash->online = true;
if (swhash->hlist_refcount > 0) {
struct swevent_hlist *hlist;
hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
WARN_ON(!hlist);
rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
mutex_unlock(&swhash->hlist_mutex);
}
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
static void perf_pmu_rotate_stop(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
WARN_ON(!irqs_disabled());
list_del_init(&cpuctx->rotation_list);
}
static void __perf_event_exit_context(void *__info)
{
struct remove_event re = { .detach_group = false };
struct perf_event_context *ctx = __info;
perf_pmu_rotate_stop(ctx->pmu);
rcu_read_lock();
list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
__perf_remove_from_context(&re);
rcu_read_unlock();
}
static void perf_event_exit_cpu_context(int cpu)
{
struct perf_event_context *ctx;
struct pmu *pmu;
int idx;
idx = srcu_read_lock(&pmus_srcu);
list_for_each_entry_rcu(pmu, &pmus, entry) {
ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
mutex_lock(&ctx->mutex);
smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
mutex_unlock(&ctx->mutex);
}
srcu_read_unlock(&pmus_srcu, idx);
}
static void perf_event_exit_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
perf_event_exit_cpu_context(cpu);
mutex_lock(&swhash->hlist_mutex);
swhash->online = false;
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
#endif
static int
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
{
int cpu;
for_each_online_cpu(cpu)
perf_event_exit_cpu(cpu);
return NOTIFY_OK;
}
/*
* Run the perf reboot notifier at the very last possible moment so that
* the generic watchdog code runs as long as possible.
*/
static struct notifier_block perf_reboot_notifier = {
.notifier_call = perf_reboot,
.priority = INT_MIN,
};
static int __cpuinit
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
case CPU_DOWN_FAILED:
perf_event_init_cpu(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DOWN_PREPARE:
perf_event_exit_cpu(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
void __init perf_event_init(void)
{
int ret;
idr_init(&pmu_idr);
perf_event_init_all_cpus();
init_srcu_struct(&pmus_srcu);
perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
perf_pmu_register(&perf_cpu_clock, NULL, -1);
perf_pmu_register(&perf_task_clock, NULL, -1);
perf_tp_register();
perf_cpu_notifier(perf_cpu_notify);
register_reboot_notifier(&perf_reboot_notifier);
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
/* do not patch jump label more than once per second */
jump_label_rate_limit(&perf_sched_events, HZ);
/*
* Build time assertion that we keep the data_head at the intended
* location. IOW, validation we got the __reserved[] size right.
*/
BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
!= 1024);
}
static int __init perf_event_sysfs_init(void)
{
struct pmu *pmu;
int ret;
mutex_lock(&pmus_lock);
ret = bus_register(&pmu_bus);
if (ret)
goto unlock;
list_for_each_entry(pmu, &pmus, entry) {
if (!pmu->name || pmu->type < 0)
continue;
ret = pmu_dev_alloc(pmu);
WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
}
pmu_bus_running = 1;
ret = 0;
unlock:
mutex_unlock(&pmus_lock);
return ret;
}
device_initcall(perf_event_sysfs_init);
#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
{
struct perf_cgroup *jc;
jc = kzalloc(sizeof(*jc), GFP_KERNEL);
if (!jc)
return ERR_PTR(-ENOMEM);
jc->info = alloc_percpu(struct perf_cgroup_info);
if (!jc->info) {
kfree(jc);
return ERR_PTR(-ENOMEM);
}
return &jc->css;
}
static void perf_cgroup_css_free(struct cgroup *cont)
{
struct perf_cgroup *jc;
jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
struct perf_cgroup, css);
free_percpu(jc->info);
kfree(jc);
}
static int __perf_cgroup_move(void *info)
{
struct task_struct *task = info;
perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
return 0;
}
static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct task_struct *task;
cgroup_taskset_for_each(task, cgrp, tset)
task_function_call(task, __perf_cgroup_move, task);
}
static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
* Ignore this case since the task hasn't ran yet, this avoids
* trying to poke a half freed task state from generic code.
*/
if (!(task->flags & PF_EXITING))
return;
task_function_call(task, __perf_cgroup_move, task);
}
struct cgroup_subsys perf_subsys = {
.name = "perf_event",
.subsys_id = perf_subsys_id,
.css_alloc = perf_cgroup_css_alloc,
.css_free = perf_cgroup_css_free,
.exit = perf_cgroup_exit,
.attach = perf_cgroup_attach,
};
#endif /* CONFIG_CGROUP_PERF */
|
6065199b923b940e1dacb4ecb3fbf1ab26bf4643
|
fbe68d84e97262d6d26dd65c704a7b50af2b3943
|
/third_party/virtualbox/src/VBox/Devices/Graphics/shaderlib/wine/include/http.h
|
4124e5ed56a7f4871c06429100572b5b3a6f9c15
|
[
"LGPL-2.0-or-later",
"LGPL-2.1-or-later",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"LGPL-2.0-only",
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference",
"CDDL-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"MPL-1.0",
"LicenseRef-scancode-generic-exception",
"Apache-2.0",
"OpenSSL",
"MIT"
] |
permissive
|
thalium/icebox
|
c4e6573f2b4f0973b6c7bb0bf068fe9e795fdcfb
|
6f78952d58da52ea4f0e55b2ab297f28e80c1160
|
refs/heads/master
| 2022-08-14T00:19:36.984579
| 2022-02-22T13:10:31
| 2022-02-22T13:10:31
| 190,019,914
| 585
| 109
|
MIT
| 2022-01-13T20:58:15
| 2019-06-03T14:18:12
|
C++
|
UTF-8
|
C
| false
| false
| 2,599
|
h
|
http.h
|
/*
* HTTP Server API definitions
*
* Copyright (C) 2009 Andrey Turkin
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*/
/*
* Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Oracle elects to use only
* the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
* a choice of LGPL license versions is made available with the language indicating
* that LGPLv2 or any later version may be used, or where a choice of which version
* of the LGPL is applied is otherwise unspecified.
*/
#ifndef __WINE_HTTP_H
#define __WINE_HTTP_H
#include <winsock2.h>
#include <ws2tcpip.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct _HTTPAPI_VERSION
{
USHORT HttpApiMajorVersion;
USHORT HttpApiMinorVersion;
} HTTPAPI_VERSION, *PHTTPAPI_VERSION;
#define HTTPAPI_VERSION_1 {1,0}
#define HTTPAPI_VERSION_2 {2,0}
/* HttpInitialize and HttpTerminate flags */
#define HTTP_INITIALIZE_SERVER 0x00000001
#define HTTP_INITIALIZE_CONFIG 0x00000002
typedef enum _HTTP_SERVICE_CONFIG_ID
{
HttpServiceConfigIPListenList,
HttpServiceConfigSSLCertInfo,
HttpServiceConfigUrlAclInfo,
HttpServiceConfigTimeout,
HttpServiceConfigMax
} HTTP_SERVICE_CONFIG_ID, *PHTTP_SERVICE_CONFIG_ID;
ULONG WINAPI HttpInitialize(HTTPAPI_VERSION,ULONG,PVOID);
ULONG WINAPI HttpTerminate(ULONG,PVOID);
ULONG WINAPI HttpAddUrl(HANDLE,PCWSTR,PVOID);
ULONG WINAPI HttpCreateHttpHandle(PHANDLE,ULONG);
ULONG WINAPI HttpDeleteServiceConfiguration(HANDLE,HTTP_SERVICE_CONFIG_ID,PVOID,ULONG,LPOVERLAPPED);
ULONG WINAPI HttpQueryServiceConfiguration(HANDLE,HTTP_SERVICE_CONFIG_ID,PVOID,ULONG,PVOID,ULONG,PULONG,LPOVERLAPPED);
ULONG WINAPI HttpSetServiceConfiguration(HANDLE,HTTP_SERVICE_CONFIG_ID,PVOID,ULONG,LPOVERLAPPED);
#ifdef __cplusplus
}
#endif
#endif /* __WINE_HTTP_H */
|
2b65810076bd128b49c4d12550390b3f3cd8c88f
|
b8969d48bbb69227db21f8446a3be4955363b694
|
/ntvdmpatch/experimental/haxm/v86/haxm/i386/spcstubs.c
|
d2e7b1bffee1a09eabda88118e72e6f2a9291849
|
[] |
no_license
|
leecher1337/ntvdmx64
|
dc0e6428e531b661836dd9543016016d98bf7b24
|
0f7f33e295024b81681d506cc86b8606960ef713
|
refs/heads/master
| 2023-07-26T13:52:27.549799
| 2023-07-09T14:33:29
| 2023-07-09T14:33:29
| 60,649,860
| 696
| 91
| null | 2020-08-23T19:41:36
| 2016-06-07T22:09:12
|
C
|
UTF-8
|
C
| false
| false
| 1,256
|
c
|
spcstubs.c
|
#include "monitorp.h"
#include "host_def.h"
static LONG stub_q_ev_count = 0; // holder for below
/* Monitor controlled code will call quick event code immediately so the
* following needn't be at all accurate.
*/
void host_q_ev_set_count(value)
LONG value;
{
stub_q_ev_count = value;
}
LONG host_q_ev_get_count()
{
return(stub_q_ev_count);
}
int host_calc_q_ev_inst_for_time(LONG time)
{
return(time);
}
void npx_reset()
{
return;
}
void initialise_npx()
{
return;
}
void cpu_clearHwInt()
{
}
#ifndef YODA
USHORT dasm_internal(
char * txt, /* Buffer to hold dis-assembly text (-1 means not required) */
USHORT seg, /* Segment for xxxx:... text in dis-assembly */
ULONG off, /* ditto offset */
BYTE default_size,/* 16BIT or 32BIT code segment */
ULONG p, /* linear address of start of instruction */
PVOID byte_at, /* like sas_hw_at() to use to read intel
* but will return -1 if there is an error
*/
char * fmt, /* sprintf format for first line seg:offset */
char * newline) /* strcat text to separate lines */
{
}
BOOL do_condition_checks = FALSE;
#endif
void SET_GLOBAL_SimulateContext(BYTE *localSimulateContext)
{
}
|
6e174954d0debabb12963a581ec39cf45b3a1eb6
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/lang/tclX/files/patch-generic_tclXdup.c
|
895d40ac97735fa3410a334657f648431ac5dbf1
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
C
| false
| false
| 383
|
c
|
patch-generic_tclXdup.c
|
--- generic/tclXdup.c.orig 2014-07-30 16:44:06.000000000 +0200
+++ generic/tclXdup.c 2014-07-30 16:45:30.000000000 +0200
@@ -128,7 +128,7 @@
char *targetChannelId;
{
Tcl_Channel srcChannel, newChannel = NULL;
- Tcl_ChannelType *channelType;
+ const Tcl_ChannelType *channelType;
int mode;
srcChannel = Tcl_GetChannel (interp, srcChannelId, &mode);
|
0d35be66dfe54febe5e980e249dee3abbec01cfe
|
bc010403013ffe60d43950224c1921982d83cbc1
|
/inc/appl/tpg_test_http_1_1_app.h
|
ef3d5a08890877d1093e2345c5ebc7aab8926969
|
[
"BSD-3-Clause"
] |
permissive
|
Juniper/warp17
|
69f11fbd1a9b8c7aa9c2eb9291a8e0e108193a6e
|
f51cc6b8f1da7ca9703c7fbb951a638ae4a0e9dc
|
refs/heads/dev/common
| 2023-09-04T12:59:33.140329
| 2020-09-28T10:49:23
| 2020-09-28T10:49:23
| 59,734,760
| 412
| 88
|
BSD-3-Clause
| 2022-10-02T04:27:14
| 2016-05-26T08:47:40
|
C
|
UTF-8
|
C
| false
| false
| 9,508
|
h
|
tpg_test_http_1_1_app.h
|
/*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
*
* Copyright (c) 2016, Juniper Networks, Inc. All rights reserved.
*
*
* The contents of this file are subject to the terms of the BSD 3 clause
* License (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at
* https://github.com/Juniper/warp17/blob/master/LICENSE.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* File name:
* tpg_test_http_1_1_app.h
*
* Description:
* HTTP 1.1 application state storage.
*
* Author:
* Dumitru Ceara, Eelco Chaudron
*
* Initial Created:
* 02/22/2016
*
* Notes:
*
*/
/*****************************************************************************
* Multiple include protection
****************************************************************************/
#ifndef _H_TPG_TEST_HTTP_1_1_APP_
#define _H_TPG_TEST_HTTP_1_1_APP_
/*****************************************************************************
* HTTP state machine.
* See tpg_test_http_1_1_sm.dot for the diagram. (xdot dot/tpg_test_http_1_1_sm.dot)
****************************************************************************/
typedef enum {
HTTPS_CL_SEND_REQ,
HTTPS_CL_PARSE_RESP_HDR,
HTTPS_CL_RECV_RESP_BODY,
HTTPS_SRV_PARSE_REQ_HDR,
HTTPS_SRV_RECV_REQ_BODY,
HTTPS_SRV_SEND_RESP,
HTTPS_CLOSED,
HTTPS_MAX_STATE
} http_state_t;
/*****************************************************************************
* HTTP application client/server definitions
****************************************************************************/
typedef struct http_app_s {
union {
app_recv_ptr_t ha_recv;
app_send_ptr_t ha_send;
};
uint32_t ha_content_length;
uint16_t ha_req_cnt;
uint16_t ha_resp_cnt;
http_state_t ha_state;
} http_app_t;
/*
* HTTP test case shared storage (i.e., the mbuf holding the req/response).
*/
typedef struct http_storage_s {
struct rte_mbuf *http_mbuf;
} http_storage_t;
/*****************************************************************************
* HTTP global stats
****************************************************************************/
typedef struct http_statistics_s {
uint32_t hts_req_err;
uint32_t hts_resp_err;
} http_statistics_t;
/*****************************************************************************
* HTTP flag definitions
****************************************************************************/
#define HTTP_FLAG_NONE 0x00000000
/* TODO: this should be done by the imix infrastructure but until then... */
#define HTTP_FLAG_IMIX 0x80000000
typedef uint32_t http_flags_type_t;
#define HTTP_IMIX_ISSET(cfg) \
((cfg) & HTTP_FLAG_IMIX)
/*****************************************************************************
* HTTP externals.
****************************************************************************/
extern void http_client_default_cfg(tpg_test_case_t *cfg);
extern void http_server_default_cfg(tpg_test_case_t *cfg);
extern bool http_client_validate_cfg(const tpg_test_case_t *cfg,
const tpg_app_t *app_cfg,
printer_arg_t *printer_arg);
extern bool http_server_validate_cfg(const tpg_test_case_t *cfg,
const tpg_app_t *app_cfg,
printer_arg_t *printer_arg);
extern void http_client_print_cfg(const tpg_app_t *app_cfg,
printer_arg_t *printer_arg);
extern void http_server_print_cfg(const tpg_app_t *app_cfg,
printer_arg_t *printer_arg);
extern void http_client_add_cfg(const tpg_test_case_t *cfg,
const tpg_app_t *app_cfg);
extern void http_server_add_cfg(const tpg_test_case_t *cfg,
const tpg_app_t *app_cfg);
extern void http_client_delete_cfg(const tpg_test_case_t *cfg,
const tpg_app_t *app_cfg);
extern void http_server_delete_cfg(const tpg_test_case_t *cfg,
const tpg_app_t *app_cfg);
extern uint32_t http_client_pkts_per_send(const tpg_test_case_t *cfg,
const tpg_app_t *app_cfg,
uint32_t max_pkt_size);
extern uint32_t http_server_pkts_per_send(const tpg_test_case_t *cfg,
const tpg_app_t *app_cfg,
uint32_t max_pkt_size);
extern void http_client_server_init(app_data_t *app_data,
const tpg_app_t *app_cfg);
extern void http_client_tc_start(const tpg_test_case_t *cfg,
const tpg_app_t *app_cfg,
app_storage_t *app_storage);
extern void http_server_tc_start(const tpg_test_case_t *cfg,
const tpg_app_t *app_cfg,
app_storage_t *app_storage);
extern void http_client_tc_stop(const tpg_test_case_t *cfg,
const tpg_app_t *app_cfg,
app_storage_t *app_storage);
extern void http_server_tc_stop(const tpg_test_case_t *cfg,
const tpg_app_t *app_cfg,
app_storage_t *app_storage);
extern void http_client_conn_up(l4_control_block_t *l4, app_data_t *app_data,
tpg_app_stats_t *stats);
extern void http_server_conn_up(l4_control_block_t *l4, app_data_t *app_data,
tpg_app_stats_t *stats);
extern void http_client_server_conn_down(l4_control_block_t *l4,
app_data_t *app_data,
tpg_app_stats_t *stats);
extern uint32_t http_client_deliver_data(l4_control_block_t *l4,
app_data_t *app_data,
tpg_app_stats_t *stats,
struct rte_mbuf *rx_data,
uint64_t rx_tstamp);
extern uint32_t http_server_deliver_data(l4_control_block_t *l4,
app_data_t *app_data,
tpg_app_stats_t *stats,
struct rte_mbuf *rx_data,
uint64_t rx_tstamp);
extern struct rte_mbuf *http_client_send_data(l4_control_block_t *l4,
app_data_t *app_data,
tpg_app_stats_t *stats,
uint32_t max_tx_size);
extern struct rte_mbuf *http_server_send_data(l4_control_block_t *l4,
app_data_t *app_data,
tpg_app_stats_t *stats,
uint32_t max_tx_size);
extern bool http_client_data_sent(l4_control_block_t *l4, app_data_t *app_data,
tpg_app_stats_t *stats,
uint32_t bytes_sent);
extern bool http_server_data_sent(l4_control_block_t *l4, app_data_t *app_data,
tpg_app_stats_t *stats,
uint32_t bytes_sent);
extern void http_stats_init(const tpg_app_t *app_cfg, tpg_app_stats_t *stats);
extern void http_stats_copy(tpg_app_stats_t *dest, const tpg_app_stats_t *src);
extern void http_stats_add(tpg_app_stats_t *total,
const tpg_app_stats_t *elem);
extern void http_stats_print(const tpg_app_stats_t *stats,
printer_arg_t *printer_arg);
extern bool http_init(void);
extern void http_lcore_init(uint32_t lcore_id);
#endif /* _H_TPG_TEST_HTTP_1_1_APP_ */
|
a74eb8984483b99fc0994c5160f97ca18915689e
|
07327b5e8b2831b12352bf7c6426bfda60129da7
|
/Include/10.0.16299.0/um/Iadmw.h
|
872524371ab965af0119142d81d0b297e079696c
|
[] |
no_license
|
tpn/winsdk-10
|
ca279df0fce03f92036e90fb04196d6282a264b7
|
9b69fd26ac0c7d0b83d378dba01080e93349c2ed
|
refs/heads/master
| 2021-01-10T01:56:18.586459
| 2018-02-19T21:26:31
| 2018-02-19T21:29:50
| 44,352,845
| 218
| 432
| null | null | null | null |
UTF-8
|
C
| false
| false
| 93,370
|
h
|
Iadmw.h
|
/* this ALWAYS GENERATED file contains the definitions for the interfaces */
/* File created by MIDL compiler version 8.01.0622 */
/* @@MIDL_FILE_HEADING( ) */
#pragma warning( disable: 4049 ) /* more than 64k source lines */
/* verify that the <rpcndr.h> version is high enough to compile this file*/
#ifndef __REQUIRED_RPCNDR_H_VERSION__
#define __REQUIRED_RPCNDR_H_VERSION__ 475
#endif
/* verify that the <rpcsal.h> version is high enough to compile this file*/
#ifndef __REQUIRED_RPCSAL_H_VERSION__
#define __REQUIRED_RPCSAL_H_VERSION__ 100
#endif
#include "rpc.h"
#include "rpcndr.h"
#ifndef __RPCNDR_H_VERSION__
#error this stub requires an updated version of <rpcndr.h>
#endif /* __RPCNDR_H_VERSION__ */
#ifndef COM_NO_WINDOWS_H
#include "windows.h"
#include "ole2.h"
#endif /*COM_NO_WINDOWS_H*/
#ifndef __iadmw_h__
#define __iadmw_h__
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
#pragma once
#endif
/* Forward Declarations */
#ifndef __IMSAdminBaseW_FWD_DEFINED__
#define __IMSAdminBaseW_FWD_DEFINED__
typedef interface IMSAdminBaseW IMSAdminBaseW;
#endif /* __IMSAdminBaseW_FWD_DEFINED__ */
#ifndef __IMSAdminBase2W_FWD_DEFINED__
#define __IMSAdminBase2W_FWD_DEFINED__
typedef interface IMSAdminBase2W IMSAdminBase2W;
#endif /* __IMSAdminBase2W_FWD_DEFINED__ */
#ifndef __IMSAdminBase3W_FWD_DEFINED__
#define __IMSAdminBase3W_FWD_DEFINED__
typedef interface IMSAdminBase3W IMSAdminBase3W;
#endif /* __IMSAdminBase3W_FWD_DEFINED__ */
#ifndef __IMSImpExpHelpW_FWD_DEFINED__
#define __IMSImpExpHelpW_FWD_DEFINED__
typedef interface IMSImpExpHelpW IMSImpExpHelpW;
#endif /* __IMSImpExpHelpW_FWD_DEFINED__ */
#ifndef __IMSAdminBaseSinkW_FWD_DEFINED__
#define __IMSAdminBaseSinkW_FWD_DEFINED__
typedef interface IMSAdminBaseSinkW IMSAdminBaseSinkW;
#endif /* __IMSAdminBaseSinkW_FWD_DEFINED__ */
#ifndef __AsyncIMSAdminBaseSinkW_FWD_DEFINED__
#define __AsyncIMSAdminBaseSinkW_FWD_DEFINED__
typedef interface AsyncIMSAdminBaseSinkW AsyncIMSAdminBaseSinkW;
#endif /* __AsyncIMSAdminBaseSinkW_FWD_DEFINED__ */
/* header files for imported files */
#include "mddefw.h"
#include "objidl.h"
#include "ocidl.h"
#ifdef __cplusplus
extern "C"{
#endif
/* interface __MIDL_itf_iadmw_0000_0000 */
/* [local] */
/*++
Copyright (c) 1997-1999 Microsoft Corporation
Module Name: iadmw.h
Admin Objects Interfaces
--*/
#ifndef _ADM_IADMW_
#define _ADM_IADMW_
#include <winapifamily.h>
#pragma region Desktop Family
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
#include <mdcommsg.h>
#include <mdmsg.h>
/*
Error Codes
Admin api's all return HRESULTS. Since internal results are either
winerrors or Metadata specific return codes (see mdmsg.h), they are
converted to HRESULTS using the RETURNCODETOHRESULT macro (see
commsg.h).
*/
/*
Max Name Length
The maximum number of characters in the length of a metaobject name,
including the terminating NULL. This refers to each node in the tree,
not the entire path.
eg. strlen("Root") < ADMINDATA_MAX_NAME_LEN
*/
#define ADMINDATA_MAX_NAME_LEN 256
#define CLSID_MSAdminBase CLSID_MSAdminBase_W
#define IID_IMSAdminBase IID_IMSAdminBase_W
#define IMSAdminBase IMSAdminBaseW
#define IID_IMSAdminBase2 IID_IMSAdminBase2_W
#define IMSAdminBase2 IMSAdminBase2W
#define IID_IMSAdminBase3 IID_IMSAdminBase3_W
#define IMSAdminBase3 IMSAdminBase3W
#define IMSAdminBaseSink IMSAdminBaseSinkW
#define IID_IMSAdminBaseSink IID_IMSAdminBaseSink_W
#define IMSImpExpHelp IMSImpExpHelpW
#define IID_IMSImpExpHelp IID_IMSImpExpHelp_W
#define GETAdminBaseCLSID GETAdminBaseCLSIDW
#define AsyncIMSAdminBaseSink AsyncIMSAdminBaseSinkW
#define IID_AsyncIMSAdminBaseSink IID_AsyncIMSAdminBaseSink_W
DEFINE_GUID(CLSID_MSAdminBase_W, 0xa9e69610, 0xb80d, 0x11d0, 0xb9, 0xb9, 0x00, 0xa0, 0xc9, 0x22, 0xe7, 0x50);
DEFINE_GUID(IID_IMSAdminBase_W, 0x70b51430, 0xb6ca, 0x11d0, 0xb9, 0xb9, 0x00, 0xa0, 0xc9, 0x22, 0xe7, 0x50);
DEFINE_GUID(IID_IMSAdminBase2_W, 0x8298d101, 0xf992, 0x43b7, 0x8e, 0xca, 0x50, 0x52, 0xd8, 0x85, 0xb9, 0x95);
DEFINE_GUID(IID_IMSAdminBase3_W, 0xf612954d, 0x3b0b, 0x4c56, 0x95, 0x63, 0x22, 0x7b, 0x7b, 0xe6, 0x24, 0xb4);
DEFINE_GUID(IID_IMSImpExpHelp_W, 0x29ff67ff, 0x8050, 0x480f, 0x9f, 0x30, 0xcc, 0x41, 0x63, 0x5f, 0x2f, 0x9d);
DEFINE_GUID(IID_IMSAdminBaseSink_W, 0xa9e69612, 0xb80d, 0x11d0, 0xb9, 0xb9, 0x00, 0xa0, 0xc9, 0x22, 0xe7, 0x50);
DEFINE_GUID(IID_AsyncIMSAdminBaseSink_W, 0xa9e69613, 0xb80d, 0x11d0, 0xb9, 0xb9, 0x00, 0xa0, 0xc9, 0x22, 0xe7, 0x50);
#define GETAdminBaseCLSIDW(IsService) CLSID_MSAdminBase_W
/*
The Main Interface, UNICODE
*/
extern RPC_IF_HANDLE __MIDL_itf_iadmw_0000_0000_v0_0_c_ifspec;
extern RPC_IF_HANDLE __MIDL_itf_iadmw_0000_0000_v0_0_s_ifspec;
#ifndef __IMSAdminBaseW_INTERFACE_DEFINED__
#define __IMSAdminBaseW_INTERFACE_DEFINED__
/* interface IMSAdminBaseW */
/* [unique][uuid][object] */
EXTERN_C const IID IID_IMSAdminBaseW;
#if defined(__cplusplus) && !defined(CINTERFACE)
MIDL_INTERFACE("70B51430-B6CA-11d0-B9B9-00A0C922E750")
IMSAdminBaseW : public IUnknown
{
public:
virtual HRESULT STDMETHODCALLTYPE AddKey(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath) = 0;
virtual HRESULT STDMETHODCALLTYPE DeleteKey(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath) = 0;
virtual HRESULT STDMETHODCALLTYPE DeleteChildKeys(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath) = 0;
virtual HRESULT STDMETHODCALLTYPE EnumKeys(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [size_is][out] */ __RPC__out_ecount_full(256) LPWSTR pszMDName,
/* [in] */ DWORD dwMDEnumObjectIndex) = 0;
virtual HRESULT STDMETHODCALLTYPE CopyKey(
/* [in] */ METADATA_HANDLE hMDSourceHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDSourcePath,
/* [in] */ METADATA_HANDLE hMDDestHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDDestPath,
/* [in] */ BOOL bMDOverwriteFlag,
/* [in] */ BOOL bMDCopyFlag) = 0;
virtual HRESULT STDMETHODCALLTYPE RenameKey(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDNewName) = 0;
virtual /* [local] */ HRESULT STDMETHODCALLTYPE SetData(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [in] */ PMETADATA_RECORD pmdrMDData) = 0;
virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetData(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [out][in] */ PMETADATA_RECORD pmdrMDData,
/* [out] */ DWORD *pdwMDRequiredDataLen) = 0;
virtual HRESULT STDMETHODCALLTYPE DeleteData(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDIdentifier,
/* [in] */ DWORD dwMDDataType) = 0;
virtual /* [local] */ HRESULT STDMETHODCALLTYPE EnumData(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [out][in] */ PMETADATA_RECORD pmdrMDData,
/* [in] */ DWORD dwMDEnumDataIndex,
/* [out] */ DWORD *pdwMDRequiredDataLen) = 0;
virtual /* [local] */ HRESULT STDMETHODCALLTYPE GetAllData(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDAttributes,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType,
/* [out] */ DWORD *pdwMDNumDataEntries,
/* [out] */ DWORD *pdwMDDataSetNumber,
/* [in] */ DWORD dwMDBufferSize,
/* [size_is][out] */ unsigned char *pbMDBuffer,
/* [out] */ DWORD *pdwMDRequiredBufferSize) = 0;
virtual HRESULT STDMETHODCALLTYPE DeleteAllData(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType) = 0;
virtual HRESULT STDMETHODCALLTYPE CopyData(
/* [in] */ METADATA_HANDLE hMDSourceHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDSourcePath,
/* [in] */ METADATA_HANDLE hMDDestHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDDestPath,
/* [in] */ DWORD dwMDAttributes,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType,
/* [in] */ BOOL bMDCopyFlag) = 0;
virtual HRESULT STDMETHODCALLTYPE GetDataPaths(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDIdentifier,
/* [in] */ DWORD dwMDDataType,
/* [in] */ DWORD dwMDBufferSize,
/* [size_is][out] */ __RPC__out_ecount_full(dwMDBufferSize) WCHAR *pszBuffer,
/* [out] */ __RPC__out DWORD *pdwMDRequiredBufferSize) = 0;
virtual HRESULT STDMETHODCALLTYPE OpenKey(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDAccessRequested,
/* [in] */ DWORD dwMDTimeOut,
/* [out] */ __RPC__out PMETADATA_HANDLE phMDNewHandle) = 0;
virtual HRESULT STDMETHODCALLTYPE CloseKey(
/* [in] */ METADATA_HANDLE hMDHandle) = 0;
virtual HRESULT STDMETHODCALLTYPE ChangePermissions(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [in] */ DWORD dwMDTimeOut,
/* [in] */ DWORD dwMDAccessRequested) = 0;
virtual HRESULT STDMETHODCALLTYPE SaveData( void) = 0;
virtual HRESULT STDMETHODCALLTYPE GetHandleInfo(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [out] */ __RPC__out PMETADATA_HANDLE_INFO pmdhiInfo) = 0;
virtual HRESULT STDMETHODCALLTYPE GetSystemChangeNumber(
/* [out] */ __RPC__out DWORD *pdwSystemChangeNumber) = 0;
virtual HRESULT STDMETHODCALLTYPE GetDataSetNumber(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [out] */ __RPC__out DWORD *pdwMDDataSetNumber) = 0;
virtual HRESULT STDMETHODCALLTYPE SetLastChangeTime(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ __RPC__in PFILETIME pftMDLastChangeTime,
/* [in] */ BOOL bLocalTime) = 0;
virtual HRESULT STDMETHODCALLTYPE GetLastChangeTime(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [out] */ __RPC__out PFILETIME pftMDLastChangeTime,
/* [in] */ BOOL bLocalTime) = 0;
virtual /* [restricted][local] */ HRESULT STDMETHODCALLTYPE KeyExchangePhase1( void) = 0;
virtual /* [restricted][local] */ HRESULT STDMETHODCALLTYPE KeyExchangePhase2( void) = 0;
virtual HRESULT STDMETHODCALLTYPE Backup(
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags) = 0;
virtual HRESULT STDMETHODCALLTYPE Restore(
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags) = 0;
virtual HRESULT STDMETHODCALLTYPE EnumBackups(
/* [size_is][out][in] */ __RPC__inout_ecount_full(256) LPWSTR pszMDBackupLocation,
/* [out] */ __RPC__out DWORD *pdwMDVersion,
/* [out] */ __RPC__out PFILETIME pftMDBackupTime,
/* [in] */ DWORD dwMDEnumIndex) = 0;
virtual HRESULT STDMETHODCALLTYPE DeleteBackup(
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion) = 0;
virtual HRESULT STDMETHODCALLTYPE UnmarshalInterface(
/* [out] */ __RPC__deref_out_opt IMSAdminBaseW **piadmbwInterface) = 0;
virtual /* [restricted][local] */ HRESULT STDMETHODCALLTYPE GetServerGuid( void) = 0;
};
#else /* C style interface */
typedef struct IMSAdminBaseWVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ __RPC__in REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
__RPC__in IMSAdminBaseW * This);
ULONG ( STDMETHODCALLTYPE *Release )(
__RPC__in IMSAdminBaseW * This);
HRESULT ( STDMETHODCALLTYPE *AddKey )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath);
HRESULT ( STDMETHODCALLTYPE *DeleteKey )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath);
HRESULT ( STDMETHODCALLTYPE *DeleteChildKeys )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath);
HRESULT ( STDMETHODCALLTYPE *EnumKeys )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [size_is][out] */ __RPC__out_ecount_full(256) LPWSTR pszMDName,
/* [in] */ DWORD dwMDEnumObjectIndex);
HRESULT ( STDMETHODCALLTYPE *CopyKey )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDSourceHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDSourcePath,
/* [in] */ METADATA_HANDLE hMDDestHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDDestPath,
/* [in] */ BOOL bMDOverwriteFlag,
/* [in] */ BOOL bMDCopyFlag);
HRESULT ( STDMETHODCALLTYPE *RenameKey )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDNewName);
/* [local] */ HRESULT ( STDMETHODCALLTYPE *SetData )(
IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [in] */ PMETADATA_RECORD pmdrMDData);
/* [local] */ HRESULT ( STDMETHODCALLTYPE *GetData )(
IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [out][in] */ PMETADATA_RECORD pmdrMDData,
/* [out] */ DWORD *pdwMDRequiredDataLen);
HRESULT ( STDMETHODCALLTYPE *DeleteData )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDIdentifier,
/* [in] */ DWORD dwMDDataType);
/* [local] */ HRESULT ( STDMETHODCALLTYPE *EnumData )(
IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [out][in] */ PMETADATA_RECORD pmdrMDData,
/* [in] */ DWORD dwMDEnumDataIndex,
/* [out] */ DWORD *pdwMDRequiredDataLen);
/* [local] */ HRESULT ( STDMETHODCALLTYPE *GetAllData )(
IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDAttributes,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType,
/* [out] */ DWORD *pdwMDNumDataEntries,
/* [out] */ DWORD *pdwMDDataSetNumber,
/* [in] */ DWORD dwMDBufferSize,
/* [size_is][out] */ unsigned char *pbMDBuffer,
/* [out] */ DWORD *pdwMDRequiredBufferSize);
HRESULT ( STDMETHODCALLTYPE *DeleteAllData )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType);
HRESULT ( STDMETHODCALLTYPE *CopyData )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDSourceHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDSourcePath,
/* [in] */ METADATA_HANDLE hMDDestHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDDestPath,
/* [in] */ DWORD dwMDAttributes,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType,
/* [in] */ BOOL bMDCopyFlag);
HRESULT ( STDMETHODCALLTYPE *GetDataPaths )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDIdentifier,
/* [in] */ DWORD dwMDDataType,
/* [in] */ DWORD dwMDBufferSize,
/* [size_is][out] */ __RPC__out_ecount_full(dwMDBufferSize) WCHAR *pszBuffer,
/* [out] */ __RPC__out DWORD *pdwMDRequiredBufferSize);
HRESULT ( STDMETHODCALLTYPE *OpenKey )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDAccessRequested,
/* [in] */ DWORD dwMDTimeOut,
/* [out] */ __RPC__out PMETADATA_HANDLE phMDNewHandle);
HRESULT ( STDMETHODCALLTYPE *CloseKey )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle);
HRESULT ( STDMETHODCALLTYPE *ChangePermissions )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [in] */ DWORD dwMDTimeOut,
/* [in] */ DWORD dwMDAccessRequested);
HRESULT ( STDMETHODCALLTYPE *SaveData )(
__RPC__in IMSAdminBaseW * This);
HRESULT ( STDMETHODCALLTYPE *GetHandleInfo )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [out] */ __RPC__out PMETADATA_HANDLE_INFO pmdhiInfo);
HRESULT ( STDMETHODCALLTYPE *GetSystemChangeNumber )(
__RPC__in IMSAdminBaseW * This,
/* [out] */ __RPC__out DWORD *pdwSystemChangeNumber);
HRESULT ( STDMETHODCALLTYPE *GetDataSetNumber )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [out] */ __RPC__out DWORD *pdwMDDataSetNumber);
HRESULT ( STDMETHODCALLTYPE *SetLastChangeTime )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ __RPC__in PFILETIME pftMDLastChangeTime,
/* [in] */ BOOL bLocalTime);
HRESULT ( STDMETHODCALLTYPE *GetLastChangeTime )(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [out] */ __RPC__out PFILETIME pftMDLastChangeTime,
/* [in] */ BOOL bLocalTime);
/* [restricted][local] */ HRESULT ( STDMETHODCALLTYPE *KeyExchangePhase1 )(
IMSAdminBaseW * This);
/* [restricted][local] */ HRESULT ( STDMETHODCALLTYPE *KeyExchangePhase2 )(
IMSAdminBaseW * This);
HRESULT ( STDMETHODCALLTYPE *Backup )(
__RPC__in IMSAdminBaseW * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags);
HRESULT ( STDMETHODCALLTYPE *Restore )(
__RPC__in IMSAdminBaseW * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags);
HRESULT ( STDMETHODCALLTYPE *EnumBackups )(
__RPC__in IMSAdminBaseW * This,
/* [size_is][out][in] */ __RPC__inout_ecount_full(256) LPWSTR pszMDBackupLocation,
/* [out] */ __RPC__out DWORD *pdwMDVersion,
/* [out] */ __RPC__out PFILETIME pftMDBackupTime,
/* [in] */ DWORD dwMDEnumIndex);
HRESULT ( STDMETHODCALLTYPE *DeleteBackup )(
__RPC__in IMSAdminBaseW * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion);
HRESULT ( STDMETHODCALLTYPE *UnmarshalInterface )(
__RPC__in IMSAdminBaseW * This,
/* [out] */ __RPC__deref_out_opt IMSAdminBaseW **piadmbwInterface);
/* [restricted][local] */ HRESULT ( STDMETHODCALLTYPE *GetServerGuid )(
IMSAdminBaseW * This);
END_INTERFACE
} IMSAdminBaseWVtbl;
interface IMSAdminBaseW
{
CONST_VTBL struct IMSAdminBaseWVtbl *lpVtbl;
};
#ifdef COBJMACROS
#define IMSAdminBaseW_QueryInterface(This,riid,ppvObject) \
( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
#define IMSAdminBaseW_AddRef(This) \
( (This)->lpVtbl -> AddRef(This) )
#define IMSAdminBaseW_Release(This) \
( (This)->lpVtbl -> Release(This) )
#define IMSAdminBaseW_AddKey(This,hMDHandle,pszMDPath) \
( (This)->lpVtbl -> AddKey(This,hMDHandle,pszMDPath) )
#define IMSAdminBaseW_DeleteKey(This,hMDHandle,pszMDPath) \
( (This)->lpVtbl -> DeleteKey(This,hMDHandle,pszMDPath) )
#define IMSAdminBaseW_DeleteChildKeys(This,hMDHandle,pszMDPath) \
( (This)->lpVtbl -> DeleteChildKeys(This,hMDHandle,pszMDPath) )
#define IMSAdminBaseW_EnumKeys(This,hMDHandle,pszMDPath,pszMDName,dwMDEnumObjectIndex) \
( (This)->lpVtbl -> EnumKeys(This,hMDHandle,pszMDPath,pszMDName,dwMDEnumObjectIndex) )
#define IMSAdminBaseW_CopyKey(This,hMDSourceHandle,pszMDSourcePath,hMDDestHandle,pszMDDestPath,bMDOverwriteFlag,bMDCopyFlag) \
( (This)->lpVtbl -> CopyKey(This,hMDSourceHandle,pszMDSourcePath,hMDDestHandle,pszMDDestPath,bMDOverwriteFlag,bMDCopyFlag) )
#define IMSAdminBaseW_RenameKey(This,hMDHandle,pszMDPath,pszMDNewName) \
( (This)->lpVtbl -> RenameKey(This,hMDHandle,pszMDPath,pszMDNewName) )
#define IMSAdminBaseW_SetData(This,hMDHandle,pszMDPath,pmdrMDData) \
( (This)->lpVtbl -> SetData(This,hMDHandle,pszMDPath,pmdrMDData) )
#define IMSAdminBaseW_GetData(This,hMDHandle,pszMDPath,pmdrMDData,pdwMDRequiredDataLen) \
( (This)->lpVtbl -> GetData(This,hMDHandle,pszMDPath,pmdrMDData,pdwMDRequiredDataLen) )
#define IMSAdminBaseW_DeleteData(This,hMDHandle,pszMDPath,dwMDIdentifier,dwMDDataType) \
( (This)->lpVtbl -> DeleteData(This,hMDHandle,pszMDPath,dwMDIdentifier,dwMDDataType) )
#define IMSAdminBaseW_EnumData(This,hMDHandle,pszMDPath,pmdrMDData,dwMDEnumDataIndex,pdwMDRequiredDataLen) \
( (This)->lpVtbl -> EnumData(This,hMDHandle,pszMDPath,pmdrMDData,dwMDEnumDataIndex,pdwMDRequiredDataLen) )
#define IMSAdminBaseW_GetAllData(This,hMDHandle,pszMDPath,dwMDAttributes,dwMDUserType,dwMDDataType,pdwMDNumDataEntries,pdwMDDataSetNumber,dwMDBufferSize,pbMDBuffer,pdwMDRequiredBufferSize) \
( (This)->lpVtbl -> GetAllData(This,hMDHandle,pszMDPath,dwMDAttributes,dwMDUserType,dwMDDataType,pdwMDNumDataEntries,pdwMDDataSetNumber,dwMDBufferSize,pbMDBuffer,pdwMDRequiredBufferSize) )
#define IMSAdminBaseW_DeleteAllData(This,hMDHandle,pszMDPath,dwMDUserType,dwMDDataType) \
( (This)->lpVtbl -> DeleteAllData(This,hMDHandle,pszMDPath,dwMDUserType,dwMDDataType) )
#define IMSAdminBaseW_CopyData(This,hMDSourceHandle,pszMDSourcePath,hMDDestHandle,pszMDDestPath,dwMDAttributes,dwMDUserType,dwMDDataType,bMDCopyFlag) \
( (This)->lpVtbl -> CopyData(This,hMDSourceHandle,pszMDSourcePath,hMDDestHandle,pszMDDestPath,dwMDAttributes,dwMDUserType,dwMDDataType,bMDCopyFlag) )
#define IMSAdminBaseW_GetDataPaths(This,hMDHandle,pszMDPath,dwMDIdentifier,dwMDDataType,dwMDBufferSize,pszBuffer,pdwMDRequiredBufferSize) \
( (This)->lpVtbl -> GetDataPaths(This,hMDHandle,pszMDPath,dwMDIdentifier,dwMDDataType,dwMDBufferSize,pszBuffer,pdwMDRequiredBufferSize) )
#define IMSAdminBaseW_OpenKey(This,hMDHandle,pszMDPath,dwMDAccessRequested,dwMDTimeOut,phMDNewHandle) \
( (This)->lpVtbl -> OpenKey(This,hMDHandle,pszMDPath,dwMDAccessRequested,dwMDTimeOut,phMDNewHandle) )
#define IMSAdminBaseW_CloseKey(This,hMDHandle) \
( (This)->lpVtbl -> CloseKey(This,hMDHandle) )
#define IMSAdminBaseW_ChangePermissions(This,hMDHandle,dwMDTimeOut,dwMDAccessRequested) \
( (This)->lpVtbl -> ChangePermissions(This,hMDHandle,dwMDTimeOut,dwMDAccessRequested) )
#define IMSAdminBaseW_SaveData(This) \
( (This)->lpVtbl -> SaveData(This) )
#define IMSAdminBaseW_GetHandleInfo(This,hMDHandle,pmdhiInfo) \
( (This)->lpVtbl -> GetHandleInfo(This,hMDHandle,pmdhiInfo) )
#define IMSAdminBaseW_GetSystemChangeNumber(This,pdwSystemChangeNumber) \
( (This)->lpVtbl -> GetSystemChangeNumber(This,pdwSystemChangeNumber) )
#define IMSAdminBaseW_GetDataSetNumber(This,hMDHandle,pszMDPath,pdwMDDataSetNumber) \
( (This)->lpVtbl -> GetDataSetNumber(This,hMDHandle,pszMDPath,pdwMDDataSetNumber) )
#define IMSAdminBaseW_SetLastChangeTime(This,hMDHandle,pszMDPath,pftMDLastChangeTime,bLocalTime) \
( (This)->lpVtbl -> SetLastChangeTime(This,hMDHandle,pszMDPath,pftMDLastChangeTime,bLocalTime) )
#define IMSAdminBaseW_GetLastChangeTime(This,hMDHandle,pszMDPath,pftMDLastChangeTime,bLocalTime) \
( (This)->lpVtbl -> GetLastChangeTime(This,hMDHandle,pszMDPath,pftMDLastChangeTime,bLocalTime) )
#define IMSAdminBaseW_KeyExchangePhase1(This) \
( (This)->lpVtbl -> KeyExchangePhase1(This) )
#define IMSAdminBaseW_KeyExchangePhase2(This) \
( (This)->lpVtbl -> KeyExchangePhase2(This) )
#define IMSAdminBaseW_Backup(This,pszMDBackupLocation,dwMDVersion,dwMDFlags) \
( (This)->lpVtbl -> Backup(This,pszMDBackupLocation,dwMDVersion,dwMDFlags) )
#define IMSAdminBaseW_Restore(This,pszMDBackupLocation,dwMDVersion,dwMDFlags) \
( (This)->lpVtbl -> Restore(This,pszMDBackupLocation,dwMDVersion,dwMDFlags) )
#define IMSAdminBaseW_EnumBackups(This,pszMDBackupLocation,pdwMDVersion,pftMDBackupTime,dwMDEnumIndex) \
( (This)->lpVtbl -> EnumBackups(This,pszMDBackupLocation,pdwMDVersion,pftMDBackupTime,dwMDEnumIndex) )
#define IMSAdminBaseW_DeleteBackup(This,pszMDBackupLocation,dwMDVersion) \
( (This)->lpVtbl -> DeleteBackup(This,pszMDBackupLocation,dwMDVersion) )
#define IMSAdminBaseW_UnmarshalInterface(This,piadmbwInterface) \
( (This)->lpVtbl -> UnmarshalInterface(This,piadmbwInterface) )
#define IMSAdminBaseW_GetServerGuid(This) \
( (This)->lpVtbl -> GetServerGuid(This) )
#endif /* COBJMACROS */
#endif /* C style interface */
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_R_SetData_Proxy(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ __RPC__in PMETADATA_RECORD pmdrMDData);
void __RPC_STUB IMSAdminBaseW_R_SetData_Stub(
IRpcStubBuffer *This,
IRpcChannelBuffer *_pRpcChannelBuffer,
PRPC_MESSAGE _pRpcMessage,
DWORD *_pdwStubPhase);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_R_GetData_Proxy(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [out][in] */ __RPC__inout PMETADATA_RECORD pmdrMDData,
/* [out] */ __RPC__out DWORD *pdwMDRequiredDataLen,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppDataBlob);
void __RPC_STUB IMSAdminBaseW_R_GetData_Stub(
IRpcStubBuffer *This,
IRpcChannelBuffer *_pRpcChannelBuffer,
PRPC_MESSAGE _pRpcMessage,
DWORD *_pdwStubPhase);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_R_EnumData_Proxy(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [out][in] */ __RPC__inout PMETADATA_RECORD pmdrMDData,
/* [in] */ DWORD dwMDEnumDataIndex,
/* [out] */ __RPC__out DWORD *pdwMDRequiredDataLen,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppDataBlob);
void __RPC_STUB IMSAdminBaseW_R_EnumData_Stub(
IRpcStubBuffer *This,
IRpcChannelBuffer *_pRpcChannelBuffer,
PRPC_MESSAGE _pRpcMessage,
DWORD *_pdwStubPhase);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_R_GetAllData_Proxy(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDAttributes,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType,
/* [out] */ __RPC__out DWORD *pdwMDNumDataEntries,
/* [out] */ __RPC__out DWORD *pdwMDDataSetNumber,
/* [in] */ DWORD dwMDBufferSize,
/* [out] */ __RPC__out DWORD *pdwMDRequiredBufferSize,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppDataBlob);
void __RPC_STUB IMSAdminBaseW_R_GetAllData_Stub(
IRpcStubBuffer *This,
IRpcChannelBuffer *_pRpcChannelBuffer,
PRPC_MESSAGE _pRpcMessage,
DWORD *_pdwStubPhase);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_R_KeyExchangePhase1_Proxy(
__RPC__in IMSAdminBaseW * This,
/* [in][unique] */ __RPC__in_opt struct _IIS_CRYPTO_BLOB *pClientKeyExchangeKeyBlob,
/* [in][unique] */ __RPC__in_opt struct _IIS_CRYPTO_BLOB *pClientSignatureKeyBlob,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppServerKeyExchangeKeyBlob,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppServerSignatureKeyBlob,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppServerSessionKeyBlob);
void __RPC_STUB IMSAdminBaseW_R_KeyExchangePhase1_Stub(
IRpcStubBuffer *This,
IRpcChannelBuffer *_pRpcChannelBuffer,
PRPC_MESSAGE _pRpcMessage,
DWORD *_pdwStubPhase);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_R_KeyExchangePhase2_Proxy(
__RPC__in IMSAdminBaseW * This,
/* [in][unique] */ __RPC__in_opt struct _IIS_CRYPTO_BLOB *pClientSessionKeyBlob,
/* [in][unique] */ __RPC__in_opt struct _IIS_CRYPTO_BLOB *pClientHashBlob,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppServerHashBlob);
void __RPC_STUB IMSAdminBaseW_R_KeyExchangePhase2_Stub(
IRpcStubBuffer *This,
IRpcChannelBuffer *_pRpcChannelBuffer,
PRPC_MESSAGE _pRpcMessage,
DWORD *_pdwStubPhase);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_R_GetServerGuid_Proxy(
__RPC__in IMSAdminBaseW * This,
/* [out] */ __RPC__out GUID *pServerGuid);
void __RPC_STUB IMSAdminBaseW_R_GetServerGuid_Stub(
IRpcStubBuffer *This,
IRpcChannelBuffer *_pRpcChannelBuffer,
PRPC_MESSAGE _pRpcMessage,
DWORD *_pdwStubPhase);
#endif /* __IMSAdminBaseW_INTERFACE_DEFINED__ */
#ifndef __IMSAdminBase2W_INTERFACE_DEFINED__
#define __IMSAdminBase2W_INTERFACE_DEFINED__
/* interface IMSAdminBase2W */
/* [unique][uuid][object] */
EXTERN_C const IID IID_IMSAdminBase2W;
#if defined(__cplusplus) && !defined(CINTERFACE)
MIDL_INTERFACE("8298d101-f992-43b7-8eca-5052d885b995")
IMSAdminBase2W : public IMSAdminBaseW
{
public:
virtual HRESULT STDMETHODCALLTYPE BackupWithPasswd(
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszPasswd) = 0;
virtual HRESULT STDMETHODCALLTYPE RestoreWithPasswd(
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszPasswd) = 0;
virtual HRESULT STDMETHODCALLTYPE Export(
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszPasswd,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszFileName,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszSourcePath,
/* [in] */ DWORD dwMDFlags) = 0;
virtual HRESULT STDMETHODCALLTYPE Import(
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszPasswd,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszFileName,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszSourcePath,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszDestPath,
/* [in] */ DWORD dwMDFlags) = 0;
virtual HRESULT STDMETHODCALLTYPE RestoreHistory(
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDHistoryLocation,
/* [in] */ DWORD dwMDMajorVersion,
/* [in] */ DWORD dwMDMinorVersion,
/* [in] */ DWORD dwMDFlags) = 0;
virtual HRESULT STDMETHODCALLTYPE EnumHistory(
/* [size_is][out][in] */ __RPC__inout_ecount_full(256) LPWSTR pszMDHistoryLocation,
/* [out] */ __RPC__out DWORD *pdwMDMajorVersion,
/* [out] */ __RPC__out DWORD *pdwMDMinorVersion,
/* [out] */ __RPC__out PFILETIME pftMDHistoryTime,
/* [in] */ DWORD dwMDEnumIndex) = 0;
};
#else /* C style interface */
typedef struct IMSAdminBase2WVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ __RPC__in REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
__RPC__in IMSAdminBase2W * This);
ULONG ( STDMETHODCALLTYPE *Release )(
__RPC__in IMSAdminBase2W * This);
HRESULT ( STDMETHODCALLTYPE *AddKey )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath);
HRESULT ( STDMETHODCALLTYPE *DeleteKey )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath);
HRESULT ( STDMETHODCALLTYPE *DeleteChildKeys )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath);
HRESULT ( STDMETHODCALLTYPE *EnumKeys )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [size_is][out] */ __RPC__out_ecount_full(256) LPWSTR pszMDName,
/* [in] */ DWORD dwMDEnumObjectIndex);
HRESULT ( STDMETHODCALLTYPE *CopyKey )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDSourceHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDSourcePath,
/* [in] */ METADATA_HANDLE hMDDestHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDDestPath,
/* [in] */ BOOL bMDOverwriteFlag,
/* [in] */ BOOL bMDCopyFlag);
HRESULT ( STDMETHODCALLTYPE *RenameKey )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDNewName);
/* [local] */ HRESULT ( STDMETHODCALLTYPE *SetData )(
IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [in] */ PMETADATA_RECORD pmdrMDData);
/* [local] */ HRESULT ( STDMETHODCALLTYPE *GetData )(
IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [out][in] */ PMETADATA_RECORD pmdrMDData,
/* [out] */ DWORD *pdwMDRequiredDataLen);
HRESULT ( STDMETHODCALLTYPE *DeleteData )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDIdentifier,
/* [in] */ DWORD dwMDDataType);
/* [local] */ HRESULT ( STDMETHODCALLTYPE *EnumData )(
IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [out][in] */ PMETADATA_RECORD pmdrMDData,
/* [in] */ DWORD dwMDEnumDataIndex,
/* [out] */ DWORD *pdwMDRequiredDataLen);
/* [local] */ HRESULT ( STDMETHODCALLTYPE *GetAllData )(
IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDAttributes,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType,
/* [out] */ DWORD *pdwMDNumDataEntries,
/* [out] */ DWORD *pdwMDDataSetNumber,
/* [in] */ DWORD dwMDBufferSize,
/* [size_is][out] */ unsigned char *pbMDBuffer,
/* [out] */ DWORD *pdwMDRequiredBufferSize);
HRESULT ( STDMETHODCALLTYPE *DeleteAllData )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType);
HRESULT ( STDMETHODCALLTYPE *CopyData )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDSourceHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDSourcePath,
/* [in] */ METADATA_HANDLE hMDDestHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDDestPath,
/* [in] */ DWORD dwMDAttributes,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType,
/* [in] */ BOOL bMDCopyFlag);
HRESULT ( STDMETHODCALLTYPE *GetDataPaths )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDIdentifier,
/* [in] */ DWORD dwMDDataType,
/* [in] */ DWORD dwMDBufferSize,
/* [size_is][out] */ __RPC__out_ecount_full(dwMDBufferSize) WCHAR *pszBuffer,
/* [out] */ __RPC__out DWORD *pdwMDRequiredBufferSize);
HRESULT ( STDMETHODCALLTYPE *OpenKey )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDAccessRequested,
/* [in] */ DWORD dwMDTimeOut,
/* [out] */ __RPC__out PMETADATA_HANDLE phMDNewHandle);
HRESULT ( STDMETHODCALLTYPE *CloseKey )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle);
HRESULT ( STDMETHODCALLTYPE *ChangePermissions )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [in] */ DWORD dwMDTimeOut,
/* [in] */ DWORD dwMDAccessRequested);
HRESULT ( STDMETHODCALLTYPE *SaveData )(
__RPC__in IMSAdminBase2W * This);
HRESULT ( STDMETHODCALLTYPE *GetHandleInfo )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [out] */ __RPC__out PMETADATA_HANDLE_INFO pmdhiInfo);
HRESULT ( STDMETHODCALLTYPE *GetSystemChangeNumber )(
__RPC__in IMSAdminBase2W * This,
/* [out] */ __RPC__out DWORD *pdwSystemChangeNumber);
HRESULT ( STDMETHODCALLTYPE *GetDataSetNumber )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [out] */ __RPC__out DWORD *pdwMDDataSetNumber);
HRESULT ( STDMETHODCALLTYPE *SetLastChangeTime )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ __RPC__in PFILETIME pftMDLastChangeTime,
/* [in] */ BOOL bLocalTime);
HRESULT ( STDMETHODCALLTYPE *GetLastChangeTime )(
__RPC__in IMSAdminBase2W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [out] */ __RPC__out PFILETIME pftMDLastChangeTime,
/* [in] */ BOOL bLocalTime);
/* [restricted][local] */ HRESULT ( STDMETHODCALLTYPE *KeyExchangePhase1 )(
IMSAdminBase2W * This);
/* [restricted][local] */ HRESULT ( STDMETHODCALLTYPE *KeyExchangePhase2 )(
IMSAdminBase2W * This);
HRESULT ( STDMETHODCALLTYPE *Backup )(
__RPC__in IMSAdminBase2W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags);
HRESULT ( STDMETHODCALLTYPE *Restore )(
__RPC__in IMSAdminBase2W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags);
HRESULT ( STDMETHODCALLTYPE *EnumBackups )(
__RPC__in IMSAdminBase2W * This,
/* [size_is][out][in] */ __RPC__inout_ecount_full(256) LPWSTR pszMDBackupLocation,
/* [out] */ __RPC__out DWORD *pdwMDVersion,
/* [out] */ __RPC__out PFILETIME pftMDBackupTime,
/* [in] */ DWORD dwMDEnumIndex);
HRESULT ( STDMETHODCALLTYPE *DeleteBackup )(
__RPC__in IMSAdminBase2W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion);
HRESULT ( STDMETHODCALLTYPE *UnmarshalInterface )(
__RPC__in IMSAdminBase2W * This,
/* [out] */ __RPC__deref_out_opt IMSAdminBaseW **piadmbwInterface);
/* [restricted][local] */ HRESULT ( STDMETHODCALLTYPE *GetServerGuid )(
IMSAdminBase2W * This);
HRESULT ( STDMETHODCALLTYPE *BackupWithPasswd )(
__RPC__in IMSAdminBase2W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszPasswd);
HRESULT ( STDMETHODCALLTYPE *RestoreWithPasswd )(
__RPC__in IMSAdminBase2W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszPasswd);
HRESULT ( STDMETHODCALLTYPE *Export )(
__RPC__in IMSAdminBase2W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszPasswd,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszFileName,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszSourcePath,
/* [in] */ DWORD dwMDFlags);
HRESULT ( STDMETHODCALLTYPE *Import )(
__RPC__in IMSAdminBase2W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszPasswd,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszFileName,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszSourcePath,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszDestPath,
/* [in] */ DWORD dwMDFlags);
HRESULT ( STDMETHODCALLTYPE *RestoreHistory )(
__RPC__in IMSAdminBase2W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDHistoryLocation,
/* [in] */ DWORD dwMDMajorVersion,
/* [in] */ DWORD dwMDMinorVersion,
/* [in] */ DWORD dwMDFlags);
HRESULT ( STDMETHODCALLTYPE *EnumHistory )(
__RPC__in IMSAdminBase2W * This,
/* [size_is][out][in] */ __RPC__inout_ecount_full(256) LPWSTR pszMDHistoryLocation,
/* [out] */ __RPC__out DWORD *pdwMDMajorVersion,
/* [out] */ __RPC__out DWORD *pdwMDMinorVersion,
/* [out] */ __RPC__out PFILETIME pftMDHistoryTime,
/* [in] */ DWORD dwMDEnumIndex);
END_INTERFACE
} IMSAdminBase2WVtbl;
interface IMSAdminBase2W
{
CONST_VTBL struct IMSAdminBase2WVtbl *lpVtbl;
};
#ifdef COBJMACROS
#define IMSAdminBase2W_QueryInterface(This,riid,ppvObject) \
( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
#define IMSAdminBase2W_AddRef(This) \
( (This)->lpVtbl -> AddRef(This) )
#define IMSAdminBase2W_Release(This) \
( (This)->lpVtbl -> Release(This) )
#define IMSAdminBase2W_AddKey(This,hMDHandle,pszMDPath) \
( (This)->lpVtbl -> AddKey(This,hMDHandle,pszMDPath) )
#define IMSAdminBase2W_DeleteKey(This,hMDHandle,pszMDPath) \
( (This)->lpVtbl -> DeleteKey(This,hMDHandle,pszMDPath) )
#define IMSAdminBase2W_DeleteChildKeys(This,hMDHandle,pszMDPath) \
( (This)->lpVtbl -> DeleteChildKeys(This,hMDHandle,pszMDPath) )
#define IMSAdminBase2W_EnumKeys(This,hMDHandle,pszMDPath,pszMDName,dwMDEnumObjectIndex) \
( (This)->lpVtbl -> EnumKeys(This,hMDHandle,pszMDPath,pszMDName,dwMDEnumObjectIndex) )
#define IMSAdminBase2W_CopyKey(This,hMDSourceHandle,pszMDSourcePath,hMDDestHandle,pszMDDestPath,bMDOverwriteFlag,bMDCopyFlag) \
( (This)->lpVtbl -> CopyKey(This,hMDSourceHandle,pszMDSourcePath,hMDDestHandle,pszMDDestPath,bMDOverwriteFlag,bMDCopyFlag) )
#define IMSAdminBase2W_RenameKey(This,hMDHandle,pszMDPath,pszMDNewName) \
( (This)->lpVtbl -> RenameKey(This,hMDHandle,pszMDPath,pszMDNewName) )
#define IMSAdminBase2W_SetData(This,hMDHandle,pszMDPath,pmdrMDData) \
( (This)->lpVtbl -> SetData(This,hMDHandle,pszMDPath,pmdrMDData) )
#define IMSAdminBase2W_GetData(This,hMDHandle,pszMDPath,pmdrMDData,pdwMDRequiredDataLen) \
( (This)->lpVtbl -> GetData(This,hMDHandle,pszMDPath,pmdrMDData,pdwMDRequiredDataLen) )
#define IMSAdminBase2W_DeleteData(This,hMDHandle,pszMDPath,dwMDIdentifier,dwMDDataType) \
( (This)->lpVtbl -> DeleteData(This,hMDHandle,pszMDPath,dwMDIdentifier,dwMDDataType) )
#define IMSAdminBase2W_EnumData(This,hMDHandle,pszMDPath,pmdrMDData,dwMDEnumDataIndex,pdwMDRequiredDataLen) \
( (This)->lpVtbl -> EnumData(This,hMDHandle,pszMDPath,pmdrMDData,dwMDEnumDataIndex,pdwMDRequiredDataLen) )
#define IMSAdminBase2W_GetAllData(This,hMDHandle,pszMDPath,dwMDAttributes,dwMDUserType,dwMDDataType,pdwMDNumDataEntries,pdwMDDataSetNumber,dwMDBufferSize,pbMDBuffer,pdwMDRequiredBufferSize) \
( (This)->lpVtbl -> GetAllData(This,hMDHandle,pszMDPath,dwMDAttributes,dwMDUserType,dwMDDataType,pdwMDNumDataEntries,pdwMDDataSetNumber,dwMDBufferSize,pbMDBuffer,pdwMDRequiredBufferSize) )
#define IMSAdminBase2W_DeleteAllData(This,hMDHandle,pszMDPath,dwMDUserType,dwMDDataType) \
( (This)->lpVtbl -> DeleteAllData(This,hMDHandle,pszMDPath,dwMDUserType,dwMDDataType) )
#define IMSAdminBase2W_CopyData(This,hMDSourceHandle,pszMDSourcePath,hMDDestHandle,pszMDDestPath,dwMDAttributes,dwMDUserType,dwMDDataType,bMDCopyFlag) \
( (This)->lpVtbl -> CopyData(This,hMDSourceHandle,pszMDSourcePath,hMDDestHandle,pszMDDestPath,dwMDAttributes,dwMDUserType,dwMDDataType,bMDCopyFlag) )
#define IMSAdminBase2W_GetDataPaths(This,hMDHandle,pszMDPath,dwMDIdentifier,dwMDDataType,dwMDBufferSize,pszBuffer,pdwMDRequiredBufferSize) \
( (This)->lpVtbl -> GetDataPaths(This,hMDHandle,pszMDPath,dwMDIdentifier,dwMDDataType,dwMDBufferSize,pszBuffer,pdwMDRequiredBufferSize) )
#define IMSAdminBase2W_OpenKey(This,hMDHandle,pszMDPath,dwMDAccessRequested,dwMDTimeOut,phMDNewHandle) \
( (This)->lpVtbl -> OpenKey(This,hMDHandle,pszMDPath,dwMDAccessRequested,dwMDTimeOut,phMDNewHandle) )
#define IMSAdminBase2W_CloseKey(This,hMDHandle) \
( (This)->lpVtbl -> CloseKey(This,hMDHandle) )
#define IMSAdminBase2W_ChangePermissions(This,hMDHandle,dwMDTimeOut,dwMDAccessRequested) \
( (This)->lpVtbl -> ChangePermissions(This,hMDHandle,dwMDTimeOut,dwMDAccessRequested) )
#define IMSAdminBase2W_SaveData(This) \
( (This)->lpVtbl -> SaveData(This) )
#define IMSAdminBase2W_GetHandleInfo(This,hMDHandle,pmdhiInfo) \
( (This)->lpVtbl -> GetHandleInfo(This,hMDHandle,pmdhiInfo) )
#define IMSAdminBase2W_GetSystemChangeNumber(This,pdwSystemChangeNumber) \
( (This)->lpVtbl -> GetSystemChangeNumber(This,pdwSystemChangeNumber) )
#define IMSAdminBase2W_GetDataSetNumber(This,hMDHandle,pszMDPath,pdwMDDataSetNumber) \
( (This)->lpVtbl -> GetDataSetNumber(This,hMDHandle,pszMDPath,pdwMDDataSetNumber) )
#define IMSAdminBase2W_SetLastChangeTime(This,hMDHandle,pszMDPath,pftMDLastChangeTime,bLocalTime) \
( (This)->lpVtbl -> SetLastChangeTime(This,hMDHandle,pszMDPath,pftMDLastChangeTime,bLocalTime) )
#define IMSAdminBase2W_GetLastChangeTime(This,hMDHandle,pszMDPath,pftMDLastChangeTime,bLocalTime) \
( (This)->lpVtbl -> GetLastChangeTime(This,hMDHandle,pszMDPath,pftMDLastChangeTime,bLocalTime) )
#define IMSAdminBase2W_KeyExchangePhase1(This) \
( (This)->lpVtbl -> KeyExchangePhase1(This) )
#define IMSAdminBase2W_KeyExchangePhase2(This) \
( (This)->lpVtbl -> KeyExchangePhase2(This) )
#define IMSAdminBase2W_Backup(This,pszMDBackupLocation,dwMDVersion,dwMDFlags) \
( (This)->lpVtbl -> Backup(This,pszMDBackupLocation,dwMDVersion,dwMDFlags) )
#define IMSAdminBase2W_Restore(This,pszMDBackupLocation,dwMDVersion,dwMDFlags) \
( (This)->lpVtbl -> Restore(This,pszMDBackupLocation,dwMDVersion,dwMDFlags) )
#define IMSAdminBase2W_EnumBackups(This,pszMDBackupLocation,pdwMDVersion,pftMDBackupTime,dwMDEnumIndex) \
( (This)->lpVtbl -> EnumBackups(This,pszMDBackupLocation,pdwMDVersion,pftMDBackupTime,dwMDEnumIndex) )
#define IMSAdminBase2W_DeleteBackup(This,pszMDBackupLocation,dwMDVersion) \
( (This)->lpVtbl -> DeleteBackup(This,pszMDBackupLocation,dwMDVersion) )
#define IMSAdminBase2W_UnmarshalInterface(This,piadmbwInterface) \
( (This)->lpVtbl -> UnmarshalInterface(This,piadmbwInterface) )
#define IMSAdminBase2W_GetServerGuid(This) \
( (This)->lpVtbl -> GetServerGuid(This) )
#define IMSAdminBase2W_BackupWithPasswd(This,pszMDBackupLocation,dwMDVersion,dwMDFlags,pszPasswd) \
( (This)->lpVtbl -> BackupWithPasswd(This,pszMDBackupLocation,dwMDVersion,dwMDFlags,pszPasswd) )
#define IMSAdminBase2W_RestoreWithPasswd(This,pszMDBackupLocation,dwMDVersion,dwMDFlags,pszPasswd) \
( (This)->lpVtbl -> RestoreWithPasswd(This,pszMDBackupLocation,dwMDVersion,dwMDFlags,pszPasswd) )
#define IMSAdminBase2W_Export(This,pszPasswd,pszFileName,pszSourcePath,dwMDFlags) \
( (This)->lpVtbl -> Export(This,pszPasswd,pszFileName,pszSourcePath,dwMDFlags) )
#define IMSAdminBase2W_Import(This,pszPasswd,pszFileName,pszSourcePath,pszDestPath,dwMDFlags) \
( (This)->lpVtbl -> Import(This,pszPasswd,pszFileName,pszSourcePath,pszDestPath,dwMDFlags) )
#define IMSAdminBase2W_RestoreHistory(This,pszMDHistoryLocation,dwMDMajorVersion,dwMDMinorVersion,dwMDFlags) \
( (This)->lpVtbl -> RestoreHistory(This,pszMDHistoryLocation,dwMDMajorVersion,dwMDMinorVersion,dwMDFlags) )
#define IMSAdminBase2W_EnumHistory(This,pszMDHistoryLocation,pdwMDMajorVersion,pdwMDMinorVersion,pftMDHistoryTime,dwMDEnumIndex) \
( (This)->lpVtbl -> EnumHistory(This,pszMDHistoryLocation,pdwMDMajorVersion,pdwMDMinorVersion,pftMDHistoryTime,dwMDEnumIndex) )
#endif /* COBJMACROS */
#endif /* C style interface */
#endif /* __IMSAdminBase2W_INTERFACE_DEFINED__ */
#ifndef __IMSAdminBase3W_INTERFACE_DEFINED__
#define __IMSAdminBase3W_INTERFACE_DEFINED__
/* interface IMSAdminBase3W */
/* [unique][uuid][object] */
EXTERN_C const IID IID_IMSAdminBase3W;
#if defined(__cplusplus) && !defined(CINTERFACE)
MIDL_INTERFACE("f612954d-3b0b-4c56-9563-227b7be624b4")
IMSAdminBase3W : public IMSAdminBase2W
{
public:
virtual HRESULT STDMETHODCALLTYPE GetChildPaths(
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD cchMDBufferSize,
/* [size_is][unique][out][in] */ __RPC__inout_ecount_full_opt(cchMDBufferSize) WCHAR *pszBuffer,
/* [unique][out][in] */ __RPC__inout_opt DWORD *pcchMDRequiredBufferSize) = 0;
};
#else /* C style interface */
typedef struct IMSAdminBase3WVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ __RPC__in REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
__RPC__in IMSAdminBase3W * This);
ULONG ( STDMETHODCALLTYPE *Release )(
__RPC__in IMSAdminBase3W * This);
HRESULT ( STDMETHODCALLTYPE *AddKey )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath);
HRESULT ( STDMETHODCALLTYPE *DeleteKey )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath);
HRESULT ( STDMETHODCALLTYPE *DeleteChildKeys )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath);
HRESULT ( STDMETHODCALLTYPE *EnumKeys )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [size_is][out] */ __RPC__out_ecount_full(256) LPWSTR pszMDName,
/* [in] */ DWORD dwMDEnumObjectIndex);
HRESULT ( STDMETHODCALLTYPE *CopyKey )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDSourceHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDSourcePath,
/* [in] */ METADATA_HANDLE hMDDestHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDDestPath,
/* [in] */ BOOL bMDOverwriteFlag,
/* [in] */ BOOL bMDCopyFlag);
HRESULT ( STDMETHODCALLTYPE *RenameKey )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDNewName);
/* [local] */ HRESULT ( STDMETHODCALLTYPE *SetData )(
IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [in] */ PMETADATA_RECORD pmdrMDData);
/* [local] */ HRESULT ( STDMETHODCALLTYPE *GetData )(
IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [out][in] */ PMETADATA_RECORD pmdrMDData,
/* [out] */ DWORD *pdwMDRequiredDataLen);
HRESULT ( STDMETHODCALLTYPE *DeleteData )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDIdentifier,
/* [in] */ DWORD dwMDDataType);
/* [local] */ HRESULT ( STDMETHODCALLTYPE *EnumData )(
IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [out][in] */ PMETADATA_RECORD pmdrMDData,
/* [in] */ DWORD dwMDEnumDataIndex,
/* [out] */ DWORD *pdwMDRequiredDataLen);
/* [local] */ HRESULT ( STDMETHODCALLTYPE *GetAllData )(
IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDAttributes,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType,
/* [out] */ DWORD *pdwMDNumDataEntries,
/* [out] */ DWORD *pdwMDDataSetNumber,
/* [in] */ DWORD dwMDBufferSize,
/* [size_is][out] */ unsigned char *pbMDBuffer,
/* [out] */ DWORD *pdwMDRequiredBufferSize);
HRESULT ( STDMETHODCALLTYPE *DeleteAllData )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType);
HRESULT ( STDMETHODCALLTYPE *CopyData )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDSourceHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDSourcePath,
/* [in] */ METADATA_HANDLE hMDDestHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDDestPath,
/* [in] */ DWORD dwMDAttributes,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType,
/* [in] */ BOOL bMDCopyFlag);
HRESULT ( STDMETHODCALLTYPE *GetDataPaths )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDIdentifier,
/* [in] */ DWORD dwMDDataType,
/* [in] */ DWORD dwMDBufferSize,
/* [size_is][out] */ __RPC__out_ecount_full(dwMDBufferSize) WCHAR *pszBuffer,
/* [out] */ __RPC__out DWORD *pdwMDRequiredBufferSize);
HRESULT ( STDMETHODCALLTYPE *OpenKey )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDAccessRequested,
/* [in] */ DWORD dwMDTimeOut,
/* [out] */ __RPC__out PMETADATA_HANDLE phMDNewHandle);
HRESULT ( STDMETHODCALLTYPE *CloseKey )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle);
HRESULT ( STDMETHODCALLTYPE *ChangePermissions )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [in] */ DWORD dwMDTimeOut,
/* [in] */ DWORD dwMDAccessRequested);
HRESULT ( STDMETHODCALLTYPE *SaveData )(
__RPC__in IMSAdminBase3W * This);
HRESULT ( STDMETHODCALLTYPE *GetHandleInfo )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [out] */ __RPC__out PMETADATA_HANDLE_INFO pmdhiInfo);
HRESULT ( STDMETHODCALLTYPE *GetSystemChangeNumber )(
__RPC__in IMSAdminBase3W * This,
/* [out] */ __RPC__out DWORD *pdwSystemChangeNumber);
HRESULT ( STDMETHODCALLTYPE *GetDataSetNumber )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [out] */ __RPC__out DWORD *pdwMDDataSetNumber);
HRESULT ( STDMETHODCALLTYPE *SetLastChangeTime )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ __RPC__in PFILETIME pftMDLastChangeTime,
/* [in] */ BOOL bLocalTime);
HRESULT ( STDMETHODCALLTYPE *GetLastChangeTime )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [out] */ __RPC__out PFILETIME pftMDLastChangeTime,
/* [in] */ BOOL bLocalTime);
/* [restricted][local] */ HRESULT ( STDMETHODCALLTYPE *KeyExchangePhase1 )(
IMSAdminBase3W * This);
/* [restricted][local] */ HRESULT ( STDMETHODCALLTYPE *KeyExchangePhase2 )(
IMSAdminBase3W * This);
HRESULT ( STDMETHODCALLTYPE *Backup )(
__RPC__in IMSAdminBase3W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags);
HRESULT ( STDMETHODCALLTYPE *Restore )(
__RPC__in IMSAdminBase3W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags);
HRESULT ( STDMETHODCALLTYPE *EnumBackups )(
__RPC__in IMSAdminBase3W * This,
/* [size_is][out][in] */ __RPC__inout_ecount_full(256) LPWSTR pszMDBackupLocation,
/* [out] */ __RPC__out DWORD *pdwMDVersion,
/* [out] */ __RPC__out PFILETIME pftMDBackupTime,
/* [in] */ DWORD dwMDEnumIndex);
HRESULT ( STDMETHODCALLTYPE *DeleteBackup )(
__RPC__in IMSAdminBase3W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion);
HRESULT ( STDMETHODCALLTYPE *UnmarshalInterface )(
__RPC__in IMSAdminBase3W * This,
/* [out] */ __RPC__deref_out_opt IMSAdminBaseW **piadmbwInterface);
/* [restricted][local] */ HRESULT ( STDMETHODCALLTYPE *GetServerGuid )(
IMSAdminBase3W * This);
HRESULT ( STDMETHODCALLTYPE *BackupWithPasswd )(
__RPC__in IMSAdminBase3W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszPasswd);
HRESULT ( STDMETHODCALLTYPE *RestoreWithPasswd )(
__RPC__in IMSAdminBase3W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDBackupLocation,
/* [in] */ DWORD dwMDVersion,
/* [in] */ DWORD dwMDFlags,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszPasswd);
HRESULT ( STDMETHODCALLTYPE *Export )(
__RPC__in IMSAdminBase3W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszPasswd,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszFileName,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszSourcePath,
/* [in] */ DWORD dwMDFlags);
HRESULT ( STDMETHODCALLTYPE *Import )(
__RPC__in IMSAdminBase3W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszPasswd,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszFileName,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszSourcePath,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszDestPath,
/* [in] */ DWORD dwMDFlags);
HRESULT ( STDMETHODCALLTYPE *RestoreHistory )(
__RPC__in IMSAdminBase3W * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDHistoryLocation,
/* [in] */ DWORD dwMDMajorVersion,
/* [in] */ DWORD dwMDMinorVersion,
/* [in] */ DWORD dwMDFlags);
HRESULT ( STDMETHODCALLTYPE *EnumHistory )(
__RPC__in IMSAdminBase3W * This,
/* [size_is][out][in] */ __RPC__inout_ecount_full(256) LPWSTR pszMDHistoryLocation,
/* [out] */ __RPC__out DWORD *pdwMDMajorVersion,
/* [out] */ __RPC__out DWORD *pdwMDMinorVersion,
/* [out] */ __RPC__out PFILETIME pftMDHistoryTime,
/* [in] */ DWORD dwMDEnumIndex);
HRESULT ( STDMETHODCALLTYPE *GetChildPaths )(
__RPC__in IMSAdminBase3W * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD cchMDBufferSize,
/* [size_is][unique][out][in] */ __RPC__inout_ecount_full_opt(cchMDBufferSize) WCHAR *pszBuffer,
/* [unique][out][in] */ __RPC__inout_opt DWORD *pcchMDRequiredBufferSize);
END_INTERFACE
} IMSAdminBase3WVtbl;
interface IMSAdminBase3W
{
CONST_VTBL struct IMSAdminBase3WVtbl *lpVtbl;
};
#ifdef COBJMACROS
#define IMSAdminBase3W_QueryInterface(This,riid,ppvObject) \
( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
#define IMSAdminBase3W_AddRef(This) \
( (This)->lpVtbl -> AddRef(This) )
#define IMSAdminBase3W_Release(This) \
( (This)->lpVtbl -> Release(This) )
#define IMSAdminBase3W_AddKey(This,hMDHandle,pszMDPath) \
( (This)->lpVtbl -> AddKey(This,hMDHandle,pszMDPath) )
#define IMSAdminBase3W_DeleteKey(This,hMDHandle,pszMDPath) \
( (This)->lpVtbl -> DeleteKey(This,hMDHandle,pszMDPath) )
#define IMSAdminBase3W_DeleteChildKeys(This,hMDHandle,pszMDPath) \
( (This)->lpVtbl -> DeleteChildKeys(This,hMDHandle,pszMDPath) )
#define IMSAdminBase3W_EnumKeys(This,hMDHandle,pszMDPath,pszMDName,dwMDEnumObjectIndex) \
( (This)->lpVtbl -> EnumKeys(This,hMDHandle,pszMDPath,pszMDName,dwMDEnumObjectIndex) )
#define IMSAdminBase3W_CopyKey(This,hMDSourceHandle,pszMDSourcePath,hMDDestHandle,pszMDDestPath,bMDOverwriteFlag,bMDCopyFlag) \
( (This)->lpVtbl -> CopyKey(This,hMDSourceHandle,pszMDSourcePath,hMDDestHandle,pszMDDestPath,bMDOverwriteFlag,bMDCopyFlag) )
#define IMSAdminBase3W_RenameKey(This,hMDHandle,pszMDPath,pszMDNewName) \
( (This)->lpVtbl -> RenameKey(This,hMDHandle,pszMDPath,pszMDNewName) )
#define IMSAdminBase3W_SetData(This,hMDHandle,pszMDPath,pmdrMDData) \
( (This)->lpVtbl -> SetData(This,hMDHandle,pszMDPath,pmdrMDData) )
#define IMSAdminBase3W_GetData(This,hMDHandle,pszMDPath,pmdrMDData,pdwMDRequiredDataLen) \
( (This)->lpVtbl -> GetData(This,hMDHandle,pszMDPath,pmdrMDData,pdwMDRequiredDataLen) )
#define IMSAdminBase3W_DeleteData(This,hMDHandle,pszMDPath,dwMDIdentifier,dwMDDataType) \
( (This)->lpVtbl -> DeleteData(This,hMDHandle,pszMDPath,dwMDIdentifier,dwMDDataType) )
#define IMSAdminBase3W_EnumData(This,hMDHandle,pszMDPath,pmdrMDData,dwMDEnumDataIndex,pdwMDRequiredDataLen) \
( (This)->lpVtbl -> EnumData(This,hMDHandle,pszMDPath,pmdrMDData,dwMDEnumDataIndex,pdwMDRequiredDataLen) )
#define IMSAdminBase3W_GetAllData(This,hMDHandle,pszMDPath,dwMDAttributes,dwMDUserType,dwMDDataType,pdwMDNumDataEntries,pdwMDDataSetNumber,dwMDBufferSize,pbMDBuffer,pdwMDRequiredBufferSize) \
( (This)->lpVtbl -> GetAllData(This,hMDHandle,pszMDPath,dwMDAttributes,dwMDUserType,dwMDDataType,pdwMDNumDataEntries,pdwMDDataSetNumber,dwMDBufferSize,pbMDBuffer,pdwMDRequiredBufferSize) )
#define IMSAdminBase3W_DeleteAllData(This,hMDHandle,pszMDPath,dwMDUserType,dwMDDataType) \
( (This)->lpVtbl -> DeleteAllData(This,hMDHandle,pszMDPath,dwMDUserType,dwMDDataType) )
#define IMSAdminBase3W_CopyData(This,hMDSourceHandle,pszMDSourcePath,hMDDestHandle,pszMDDestPath,dwMDAttributes,dwMDUserType,dwMDDataType,bMDCopyFlag) \
( (This)->lpVtbl -> CopyData(This,hMDSourceHandle,pszMDSourcePath,hMDDestHandle,pszMDDestPath,dwMDAttributes,dwMDUserType,dwMDDataType,bMDCopyFlag) )
#define IMSAdminBase3W_GetDataPaths(This,hMDHandle,pszMDPath,dwMDIdentifier,dwMDDataType,dwMDBufferSize,pszBuffer,pdwMDRequiredBufferSize) \
( (This)->lpVtbl -> GetDataPaths(This,hMDHandle,pszMDPath,dwMDIdentifier,dwMDDataType,dwMDBufferSize,pszBuffer,pdwMDRequiredBufferSize) )
#define IMSAdminBase3W_OpenKey(This,hMDHandle,pszMDPath,dwMDAccessRequested,dwMDTimeOut,phMDNewHandle) \
( (This)->lpVtbl -> OpenKey(This,hMDHandle,pszMDPath,dwMDAccessRequested,dwMDTimeOut,phMDNewHandle) )
#define IMSAdminBase3W_CloseKey(This,hMDHandle) \
( (This)->lpVtbl -> CloseKey(This,hMDHandle) )
#define IMSAdminBase3W_ChangePermissions(This,hMDHandle,dwMDTimeOut,dwMDAccessRequested) \
( (This)->lpVtbl -> ChangePermissions(This,hMDHandle,dwMDTimeOut,dwMDAccessRequested) )
#define IMSAdminBase3W_SaveData(This) \
( (This)->lpVtbl -> SaveData(This) )
#define IMSAdminBase3W_GetHandleInfo(This,hMDHandle,pmdhiInfo) \
( (This)->lpVtbl -> GetHandleInfo(This,hMDHandle,pmdhiInfo) )
#define IMSAdminBase3W_GetSystemChangeNumber(This,pdwSystemChangeNumber) \
( (This)->lpVtbl -> GetSystemChangeNumber(This,pdwSystemChangeNumber) )
#define IMSAdminBase3W_GetDataSetNumber(This,hMDHandle,pszMDPath,pdwMDDataSetNumber) \
( (This)->lpVtbl -> GetDataSetNumber(This,hMDHandle,pszMDPath,pdwMDDataSetNumber) )
#define IMSAdminBase3W_SetLastChangeTime(This,hMDHandle,pszMDPath,pftMDLastChangeTime,bLocalTime) \
( (This)->lpVtbl -> SetLastChangeTime(This,hMDHandle,pszMDPath,pftMDLastChangeTime,bLocalTime) )
#define IMSAdminBase3W_GetLastChangeTime(This,hMDHandle,pszMDPath,pftMDLastChangeTime,bLocalTime) \
( (This)->lpVtbl -> GetLastChangeTime(This,hMDHandle,pszMDPath,pftMDLastChangeTime,bLocalTime) )
#define IMSAdminBase3W_KeyExchangePhase1(This) \
( (This)->lpVtbl -> KeyExchangePhase1(This) )
#define IMSAdminBase3W_KeyExchangePhase2(This) \
( (This)->lpVtbl -> KeyExchangePhase2(This) )
#define IMSAdminBase3W_Backup(This,pszMDBackupLocation,dwMDVersion,dwMDFlags) \
( (This)->lpVtbl -> Backup(This,pszMDBackupLocation,dwMDVersion,dwMDFlags) )
#define IMSAdminBase3W_Restore(This,pszMDBackupLocation,dwMDVersion,dwMDFlags) \
( (This)->lpVtbl -> Restore(This,pszMDBackupLocation,dwMDVersion,dwMDFlags) )
#define IMSAdminBase3W_EnumBackups(This,pszMDBackupLocation,pdwMDVersion,pftMDBackupTime,dwMDEnumIndex) \
( (This)->lpVtbl -> EnumBackups(This,pszMDBackupLocation,pdwMDVersion,pftMDBackupTime,dwMDEnumIndex) )
#define IMSAdminBase3W_DeleteBackup(This,pszMDBackupLocation,dwMDVersion) \
( (This)->lpVtbl -> DeleteBackup(This,pszMDBackupLocation,dwMDVersion) )
#define IMSAdminBase3W_UnmarshalInterface(This,piadmbwInterface) \
( (This)->lpVtbl -> UnmarshalInterface(This,piadmbwInterface) )
#define IMSAdminBase3W_GetServerGuid(This) \
( (This)->lpVtbl -> GetServerGuid(This) )
#define IMSAdminBase3W_BackupWithPasswd(This,pszMDBackupLocation,dwMDVersion,dwMDFlags,pszPasswd) \
( (This)->lpVtbl -> BackupWithPasswd(This,pszMDBackupLocation,dwMDVersion,dwMDFlags,pszPasswd) )
#define IMSAdminBase3W_RestoreWithPasswd(This,pszMDBackupLocation,dwMDVersion,dwMDFlags,pszPasswd) \
( (This)->lpVtbl -> RestoreWithPasswd(This,pszMDBackupLocation,dwMDVersion,dwMDFlags,pszPasswd) )
#define IMSAdminBase3W_Export(This,pszPasswd,pszFileName,pszSourcePath,dwMDFlags) \
( (This)->lpVtbl -> Export(This,pszPasswd,pszFileName,pszSourcePath,dwMDFlags) )
#define IMSAdminBase3W_Import(This,pszPasswd,pszFileName,pszSourcePath,pszDestPath,dwMDFlags) \
( (This)->lpVtbl -> Import(This,pszPasswd,pszFileName,pszSourcePath,pszDestPath,dwMDFlags) )
#define IMSAdminBase3W_RestoreHistory(This,pszMDHistoryLocation,dwMDMajorVersion,dwMDMinorVersion,dwMDFlags) \
( (This)->lpVtbl -> RestoreHistory(This,pszMDHistoryLocation,dwMDMajorVersion,dwMDMinorVersion,dwMDFlags) )
#define IMSAdminBase3W_EnumHistory(This,pszMDHistoryLocation,pdwMDMajorVersion,pdwMDMinorVersion,pftMDHistoryTime,dwMDEnumIndex) \
( (This)->lpVtbl -> EnumHistory(This,pszMDHistoryLocation,pdwMDMajorVersion,pdwMDMinorVersion,pftMDHistoryTime,dwMDEnumIndex) )
#define IMSAdminBase3W_GetChildPaths(This,hMDHandle,pszMDPath,cchMDBufferSize,pszBuffer,pcchMDRequiredBufferSize) \
( (This)->lpVtbl -> GetChildPaths(This,hMDHandle,pszMDPath,cchMDBufferSize,pszBuffer,pcchMDRequiredBufferSize) )
#endif /* COBJMACROS */
#endif /* C style interface */
#endif /* __IMSAdminBase3W_INTERFACE_DEFINED__ */
#ifndef __IMSImpExpHelpW_INTERFACE_DEFINED__
#define __IMSImpExpHelpW_INTERFACE_DEFINED__
/* interface IMSImpExpHelpW */
/* [unique][uuid][object] */
EXTERN_C const IID IID_IMSImpExpHelpW;
#if defined(__cplusplus) && !defined(CINTERFACE)
MIDL_INTERFACE("29FF67FF-8050-480f-9F30-CC41635F2F9D")
IMSImpExpHelpW : public IUnknown
{
public:
virtual HRESULT STDMETHODCALLTYPE EnumeratePathsInFile(
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszFileName,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszKeyType,
/* [in] */ DWORD dwMDBufferSize,
/* [size_is][unique][out][in] */ __RPC__inout_ecount_full_opt(dwMDBufferSize) WCHAR *pszBuffer,
/* [out] */ __RPC__out DWORD *pdwMDRequiredBufferSize) = 0;
};
#else /* C style interface */
typedef struct IMSImpExpHelpWVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
__RPC__in IMSImpExpHelpW * This,
/* [in] */ __RPC__in REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
__RPC__in IMSImpExpHelpW * This);
ULONG ( STDMETHODCALLTYPE *Release )(
__RPC__in IMSImpExpHelpW * This);
HRESULT ( STDMETHODCALLTYPE *EnumeratePathsInFile )(
__RPC__in IMSImpExpHelpW * This,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszFileName,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszKeyType,
/* [in] */ DWORD dwMDBufferSize,
/* [size_is][unique][out][in] */ __RPC__inout_ecount_full_opt(dwMDBufferSize) WCHAR *pszBuffer,
/* [out] */ __RPC__out DWORD *pdwMDRequiredBufferSize);
END_INTERFACE
} IMSImpExpHelpWVtbl;
interface IMSImpExpHelpW
{
CONST_VTBL struct IMSImpExpHelpWVtbl *lpVtbl;
};
#ifdef COBJMACROS
#define IMSImpExpHelpW_QueryInterface(This,riid,ppvObject) \
( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
#define IMSImpExpHelpW_AddRef(This) \
( (This)->lpVtbl -> AddRef(This) )
#define IMSImpExpHelpW_Release(This) \
( (This)->lpVtbl -> Release(This) )
#define IMSImpExpHelpW_EnumeratePathsInFile(This,pszFileName,pszKeyType,dwMDBufferSize,pszBuffer,pdwMDRequiredBufferSize) \
( (This)->lpVtbl -> EnumeratePathsInFile(This,pszFileName,pszKeyType,dwMDBufferSize,pszBuffer,pdwMDRequiredBufferSize) )
#endif /* COBJMACROS */
#endif /* C style interface */
#endif /* __IMSImpExpHelpW_INTERFACE_DEFINED__ */
#ifndef __IMSAdminBaseSinkW_INTERFACE_DEFINED__
#define __IMSAdminBaseSinkW_INTERFACE_DEFINED__
/* interface IMSAdminBaseSinkW */
/* [unique][async_uuid][uuid][object] */
EXTERN_C const IID IID_IMSAdminBaseSinkW;
#if defined(__cplusplus) && !defined(CINTERFACE)
MIDL_INTERFACE("A9E69612-B80D-11d0-B9B9-00A0C922E750")
IMSAdminBaseSinkW : public IUnknown
{
public:
virtual HRESULT STDMETHODCALLTYPE SinkNotify(
/* [in] */ DWORD dwMDNumElements,
/* [size_is][in] */ __RPC__in_ecount_full(dwMDNumElements) MD_CHANGE_OBJECT_W pcoChangeList[ ]) = 0;
virtual HRESULT STDMETHODCALLTYPE ShutdownNotify( void) = 0;
};
#else /* C style interface */
typedef struct IMSAdminBaseSinkWVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
__RPC__in IMSAdminBaseSinkW * This,
/* [in] */ __RPC__in REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
__RPC__in IMSAdminBaseSinkW * This);
ULONG ( STDMETHODCALLTYPE *Release )(
__RPC__in IMSAdminBaseSinkW * This);
HRESULT ( STDMETHODCALLTYPE *SinkNotify )(
__RPC__in IMSAdminBaseSinkW * This,
/* [in] */ DWORD dwMDNumElements,
/* [size_is][in] */ __RPC__in_ecount_full(dwMDNumElements) MD_CHANGE_OBJECT_W pcoChangeList[ ]);
HRESULT ( STDMETHODCALLTYPE *ShutdownNotify )(
__RPC__in IMSAdminBaseSinkW * This);
END_INTERFACE
} IMSAdminBaseSinkWVtbl;
interface IMSAdminBaseSinkW
{
CONST_VTBL struct IMSAdminBaseSinkWVtbl *lpVtbl;
};
#ifdef COBJMACROS
#define IMSAdminBaseSinkW_QueryInterface(This,riid,ppvObject) \
( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
#define IMSAdminBaseSinkW_AddRef(This) \
( (This)->lpVtbl -> AddRef(This) )
#define IMSAdminBaseSinkW_Release(This) \
( (This)->lpVtbl -> Release(This) )
#define IMSAdminBaseSinkW_SinkNotify(This,dwMDNumElements,pcoChangeList) \
( (This)->lpVtbl -> SinkNotify(This,dwMDNumElements,pcoChangeList) )
#define IMSAdminBaseSinkW_ShutdownNotify(This) \
( (This)->lpVtbl -> ShutdownNotify(This) )
#endif /* COBJMACROS */
#endif /* C style interface */
#endif /* __IMSAdminBaseSinkW_INTERFACE_DEFINED__ */
#ifndef __AsyncIMSAdminBaseSinkW_INTERFACE_DEFINED__
#define __AsyncIMSAdminBaseSinkW_INTERFACE_DEFINED__
/* interface AsyncIMSAdminBaseSinkW */
/* [uuid][unique][object] */
EXTERN_C const IID IID_AsyncIMSAdminBaseSinkW;
#if defined(__cplusplus) && !defined(CINTERFACE)
MIDL_INTERFACE("A9E69613-B80D-11d0-B9B9-00A0C922E750")
AsyncIMSAdminBaseSinkW : public IUnknown
{
public:
virtual HRESULT STDMETHODCALLTYPE Begin_SinkNotify(
/* [in] */ DWORD dwMDNumElements,
/* [size_is][in] */ __RPC__in_xcount_full(dwMDNumElements) MD_CHANGE_OBJECT_W pcoChangeList[ ]) = 0;
virtual HRESULT STDMETHODCALLTYPE Finish_SinkNotify( void) = 0;
virtual HRESULT STDMETHODCALLTYPE Begin_ShutdownNotify( void) = 0;
virtual HRESULT STDMETHODCALLTYPE Finish_ShutdownNotify( void) = 0;
};
#else /* C style interface */
typedef struct AsyncIMSAdminBaseSinkWVtbl
{
BEGIN_INTERFACE
HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
__RPC__in AsyncIMSAdminBaseSinkW * This,
/* [in] */ __RPC__in REFIID riid,
/* [annotation][iid_is][out] */
_COM_Outptr_ void **ppvObject);
ULONG ( STDMETHODCALLTYPE *AddRef )(
__RPC__in AsyncIMSAdminBaseSinkW * This);
ULONG ( STDMETHODCALLTYPE *Release )(
__RPC__in AsyncIMSAdminBaseSinkW * This);
HRESULT ( STDMETHODCALLTYPE *Begin_SinkNotify )(
__RPC__in AsyncIMSAdminBaseSinkW * This,
/* [in] */ DWORD dwMDNumElements,
/* [size_is][in] */ __RPC__in_xcount_full(dwMDNumElements) MD_CHANGE_OBJECT_W pcoChangeList[ ]);
HRESULT ( STDMETHODCALLTYPE *Finish_SinkNotify )(
__RPC__in AsyncIMSAdminBaseSinkW * This);
HRESULT ( STDMETHODCALLTYPE *Begin_ShutdownNotify )(
__RPC__in AsyncIMSAdminBaseSinkW * This);
HRESULT ( STDMETHODCALLTYPE *Finish_ShutdownNotify )(
__RPC__in AsyncIMSAdminBaseSinkW * This);
END_INTERFACE
} AsyncIMSAdminBaseSinkWVtbl;
interface AsyncIMSAdminBaseSinkW
{
CONST_VTBL struct AsyncIMSAdminBaseSinkWVtbl *lpVtbl;
};
#ifdef COBJMACROS
#define AsyncIMSAdminBaseSinkW_QueryInterface(This,riid,ppvObject) \
( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
#define AsyncIMSAdminBaseSinkW_AddRef(This) \
( (This)->lpVtbl -> AddRef(This) )
#define AsyncIMSAdminBaseSinkW_Release(This) \
( (This)->lpVtbl -> Release(This) )
#define AsyncIMSAdminBaseSinkW_Begin_SinkNotify(This,dwMDNumElements,pcoChangeList) \
( (This)->lpVtbl -> Begin_SinkNotify(This,dwMDNumElements,pcoChangeList) )
#define AsyncIMSAdminBaseSinkW_Finish_SinkNotify(This) \
( (This)->lpVtbl -> Finish_SinkNotify(This) )
#define AsyncIMSAdminBaseSinkW_Begin_ShutdownNotify(This) \
( (This)->lpVtbl -> Begin_ShutdownNotify(This) )
#define AsyncIMSAdminBaseSinkW_Finish_ShutdownNotify(This) \
( (This)->lpVtbl -> Finish_ShutdownNotify(This) )
#endif /* COBJMACROS */
#endif /* C style interface */
#endif /* __AsyncIMSAdminBaseSinkW_INTERFACE_DEFINED__ */
/* interface __MIDL_itf_iadmw_0000_0005 */
/* [local] */
#endif /* WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) */
#pragma endregion
#endif
extern RPC_IF_HANDLE __MIDL_itf_iadmw_0000_0005_v0_0_c_ifspec;
extern RPC_IF_HANDLE __MIDL_itf_iadmw_0000_0005_v0_0_s_ifspec;
/* Additional Prototypes for ALL interfaces */
/* [local] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_SetData_Proxy(
IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [in] */ PMETADATA_RECORD pmdrMDData);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_SetData_Stub(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ __RPC__in PMETADATA_RECORD pmdrMDData);
/* [local] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_GetData_Proxy(
IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [out][in] */ PMETADATA_RECORD pmdrMDData,
/* [out] */ DWORD *pdwMDRequiredDataLen);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_GetData_Stub(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [out][in] */ __RPC__inout PMETADATA_RECORD pmdrMDData,
/* [out] */ __RPC__out DWORD *pdwMDRequiredDataLen,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppDataBlob);
/* [local] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_EnumData_Proxy(
IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [out][in] */ PMETADATA_RECORD pmdrMDData,
/* [in] */ DWORD dwMDEnumDataIndex,
/* [out] */ DWORD *pdwMDRequiredDataLen);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_EnumData_Stub(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [out][in] */ __RPC__inout PMETADATA_RECORD pmdrMDData,
/* [in] */ DWORD dwMDEnumDataIndex,
/* [out] */ __RPC__out DWORD *pdwMDRequiredDataLen,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppDataBlob);
/* [local] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_GetAllData_Proxy(
IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDAttributes,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType,
/* [out] */ DWORD *pdwMDNumDataEntries,
/* [out] */ DWORD *pdwMDDataSetNumber,
/* [in] */ DWORD dwMDBufferSize,
/* [size_is][out] */ unsigned char *pbMDBuffer,
/* [out] */ DWORD *pdwMDRequiredBufferSize);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_GetAllData_Stub(
__RPC__in IMSAdminBaseW * This,
/* [in] */ METADATA_HANDLE hMDHandle,
/* [string][in][unique] */ __RPC__in_opt_string LPCWSTR pszMDPath,
/* [in] */ DWORD dwMDAttributes,
/* [in] */ DWORD dwMDUserType,
/* [in] */ DWORD dwMDDataType,
/* [out] */ __RPC__out DWORD *pdwMDNumDataEntries,
/* [out] */ __RPC__out DWORD *pdwMDDataSetNumber,
/* [in] */ DWORD dwMDBufferSize,
/* [out] */ __RPC__out DWORD *pdwMDRequiredBufferSize,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppDataBlob);
/* [restricted][local] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_KeyExchangePhase1_Proxy(
IMSAdminBaseW * This);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_KeyExchangePhase1_Stub(
__RPC__in IMSAdminBaseW * This,
/* [in][unique] */ __RPC__in_opt struct _IIS_CRYPTO_BLOB *pClientKeyExchangeKeyBlob,
/* [in][unique] */ __RPC__in_opt struct _IIS_CRYPTO_BLOB *pClientSignatureKeyBlob,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppServerKeyExchangeKeyBlob,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppServerSignatureKeyBlob,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppServerSessionKeyBlob);
/* [restricted][local] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_KeyExchangePhase2_Proxy(
IMSAdminBaseW * This);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_KeyExchangePhase2_Stub(
__RPC__in IMSAdminBaseW * This,
/* [in][unique] */ __RPC__in_opt struct _IIS_CRYPTO_BLOB *pClientSessionKeyBlob,
/* [in][unique] */ __RPC__in_opt struct _IIS_CRYPTO_BLOB *pClientHashBlob,
/* [out] */ __RPC__deref_out_opt struct _IIS_CRYPTO_BLOB **ppServerHashBlob);
/* [restricted][local] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_GetServerGuid_Proxy(
IMSAdminBaseW * This);
/* [call_as] */ HRESULT STDMETHODCALLTYPE IMSAdminBaseW_GetServerGuid_Stub(
__RPC__in IMSAdminBaseW * This,
/* [out] */ __RPC__out GUID *pServerGuid);
/* end of Additional Prototypes */
#ifdef __cplusplus
}
#endif
#endif
|
c8940c4f0998e8f6f447275fc985517abcbe262e
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/multimedia/mplex/files/patch-interact.c
|
07796d00b20ecd74bb5199f5c05e0a2d75637e4f
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
C
| false
| false
| 687
|
c
|
patch-interact.c
|
--- interact.c.orig 1995-05-31 11:18:33 UTC
+++ interact.c
@@ -67,7 +67,7 @@ void ask_continue ()
char input[20];
printf ("\nContinue processing (y/n) : ");
- do gets (input);
+ do fgets (input, sizeof(input), stdin);
while (input[0]!='N'&&input[0]!='n'&&input[0]!='y'&&input[0]!='Y');
if (input[0]=='N' || input[0]=='n')
@@ -92,7 +92,7 @@ unsigned char ask_verbose ()
char input[20];
printf ("\nVery verbose mode (y/n) : ");
- do gets (input);
+ do fgets (input, sizeof(input), stdin);
while (input[0]!='N'&&input[0]!='n'&&input[0]!='y'&&input[0]!='Y');
if (input[0]=='N' || input[0]=='n') return (FALSE); else return (TRUE);
|
30b4d396d34e84159a6cc303bade41be995e4f37
|
321d11eaee885ceb3a74db0a062f9bbdf282148c
|
/crypto/evp/legacy_sha.c
|
3859286eeb2046e6532919a19fdba3e5174715d5
|
[
"Apache-2.0",
"OpenSSL",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
openssl/openssl
|
75691ebaae957793f2ff0673f77545277dfb3988
|
5318c012885a5382eadbf95aa9c1d35664bca819
|
refs/heads/master
| 2023-09-03T15:22:52.727123
| 2023-09-01T07:10:49
| 2023-09-02T14:30:01
| 7,634,677
| 24,148
| 11,569
|
Apache-2.0
| 2023-09-14T19:48:11
| 2013-01-15T22:34:48
|
C
|
UTF-8
|
C
| false
| false
| 8,233
|
c
|
legacy_sha.c
|
/*
* Copyright 2019-2021 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
/*
* All SHA low level APIs are deprecated for public use, but still ok for
* internal use.
*/
#include "internal/deprecated.h"
#include <openssl/sha.h> /* diverse SHA macros */
#include "internal/sha3.h" /* KECCAK1600_WIDTH */
#include "crypto/evp.h"
/* Used by legacy methods */
#include "crypto/sha.h"
#include "legacy_meth.h"
#include "evp_local.h"
/*-
* LEGACY methods for SHA.
* These only remain to support engines that can get these methods.
* Hardware support for SHA3 has been removed from these legacy cases.
*/
#define IMPLEMENT_LEGACY_EVP_MD_METH_SHA3(nm, fn, tag) \
static int nm##_init(EVP_MD_CTX *ctx) \
{ \
return fn##_init(EVP_MD_CTX_get0_md_data(ctx), tag, ctx->digest->md_size * 8); \
} \
static int nm##_update(EVP_MD_CTX *ctx, const void *data, size_t count) \
{ \
return fn##_update(EVP_MD_CTX_get0_md_data(ctx), data, count); \
} \
static int nm##_final(EVP_MD_CTX *ctx, unsigned char *md) \
{ \
return fn##_final(md, EVP_MD_CTX_get0_md_data(ctx)); \
}
#define IMPLEMENT_LEGACY_EVP_MD_METH_SHAKE(nm, fn, tag) \
static int nm##_init(EVP_MD_CTX *ctx) \
{ \
return fn##_init(EVP_MD_CTX_get0_md_data(ctx), tag, ctx->digest->md_size * 8); \
} \
#define sha512_224_Init sha512_224_init
#define sha512_256_Init sha512_256_init
#define sha512_224_Update SHA512_Update
#define sha512_224_Final SHA512_Final
#define sha512_256_Update SHA512_Update
#define sha512_256_Final SHA512_Final
IMPLEMENT_LEGACY_EVP_MD_METH(sha1, SHA1)
IMPLEMENT_LEGACY_EVP_MD_METH(sha224, SHA224)
IMPLEMENT_LEGACY_EVP_MD_METH(sha256, SHA256)
IMPLEMENT_LEGACY_EVP_MD_METH(sha384, SHA384)
IMPLEMENT_LEGACY_EVP_MD_METH(sha512, SHA512)
IMPLEMENT_LEGACY_EVP_MD_METH(sha512_224_int, sha512_224)
IMPLEMENT_LEGACY_EVP_MD_METH(sha512_256_int, sha512_256)
IMPLEMENT_LEGACY_EVP_MD_METH_SHA3(sha3_int, ossl_sha3, '\x06')
IMPLEMENT_LEGACY_EVP_MD_METH_SHAKE(shake, ossl_sha3, '\x1f')
static int sha1_int_ctrl(EVP_MD_CTX *ctx, int cmd, int p1, void *p2)
{
return ossl_sha1_ctrl(ctx != NULL ? EVP_MD_CTX_get0_md_data(ctx) : NULL,
cmd, p1, p2);
}
static int shake_ctrl(EVP_MD_CTX *evp_ctx, int cmd, int p1, void *p2)
{
KECCAK1600_CTX *ctx = evp_ctx->md_data;
switch (cmd) {
case EVP_MD_CTRL_XOF_LEN:
ctx->md_size = p1;
return 1;
default:
return 0;
}
}
static const EVP_MD sha1_md = {
NID_sha1,
NID_sha1WithRSAEncryption,
SHA_DIGEST_LENGTH,
EVP_MD_FLAG_DIGALGID_ABSENT,
EVP_ORIG_GLOBAL,
LEGACY_EVP_MD_METH_TABLE(sha1_init, sha1_update, sha1_final, sha1_int_ctrl,
SHA_CBLOCK),
};
const EVP_MD *EVP_sha1(void)
{
return &sha1_md;
}
static const EVP_MD sha224_md = {
NID_sha224,
NID_sha224WithRSAEncryption,
SHA224_DIGEST_LENGTH,
EVP_MD_FLAG_DIGALGID_ABSENT,
EVP_ORIG_GLOBAL,
LEGACY_EVP_MD_METH_TABLE(sha224_init, sha224_update, sha224_final, NULL,
SHA256_CBLOCK),
};
const EVP_MD *EVP_sha224(void)
{
return &sha224_md;
}
static const EVP_MD sha256_md = {
NID_sha256,
NID_sha256WithRSAEncryption,
SHA256_DIGEST_LENGTH,
EVP_MD_FLAG_DIGALGID_ABSENT,
EVP_ORIG_GLOBAL,
LEGACY_EVP_MD_METH_TABLE(sha256_init, sha256_update, sha256_final, NULL,
SHA256_CBLOCK),
};
const EVP_MD *EVP_sha256(void)
{
return &sha256_md;
}
static const EVP_MD sha512_224_md = {
NID_sha512_224,
NID_sha512_224WithRSAEncryption,
SHA224_DIGEST_LENGTH,
EVP_MD_FLAG_DIGALGID_ABSENT,
EVP_ORIG_GLOBAL,
LEGACY_EVP_MD_METH_TABLE(sha512_224_int_init, sha512_224_int_update,
sha512_224_int_final, NULL, SHA512_CBLOCK),
};
const EVP_MD *EVP_sha512_224(void)
{
return &sha512_224_md;
}
static const EVP_MD sha512_256_md = {
NID_sha512_256,
NID_sha512_256WithRSAEncryption,
SHA256_DIGEST_LENGTH,
EVP_MD_FLAG_DIGALGID_ABSENT,
EVP_ORIG_GLOBAL,
LEGACY_EVP_MD_METH_TABLE(sha512_256_int_init, sha512_256_int_update,
sha512_256_int_final, NULL, SHA512_CBLOCK),
};
const EVP_MD *EVP_sha512_256(void)
{
return &sha512_256_md;
}
static const EVP_MD sha384_md = {
NID_sha384,
NID_sha384WithRSAEncryption,
SHA384_DIGEST_LENGTH,
EVP_MD_FLAG_DIGALGID_ABSENT,
EVP_ORIG_GLOBAL,
LEGACY_EVP_MD_METH_TABLE(sha384_init, sha384_update, sha384_final, NULL,
SHA512_CBLOCK),
};
const EVP_MD *EVP_sha384(void)
{
return &sha384_md;
}
static const EVP_MD sha512_md = {
NID_sha512,
NID_sha512WithRSAEncryption,
SHA512_DIGEST_LENGTH,
EVP_MD_FLAG_DIGALGID_ABSENT,
EVP_ORIG_GLOBAL,
LEGACY_EVP_MD_METH_TABLE(sha512_init, sha512_update, sha512_final, NULL,
SHA512_CBLOCK),
};
const EVP_MD *EVP_sha512(void)
{
return &sha512_md;
}
#define EVP_MD_SHA3(bitlen) \
const EVP_MD *EVP_sha3_##bitlen(void) \
{ \
static const EVP_MD sha3_##bitlen##_md = { \
NID_sha3_##bitlen, \
NID_RSA_SHA3_##bitlen, \
bitlen / 8, \
EVP_MD_FLAG_DIGALGID_ABSENT, \
EVP_ORIG_GLOBAL, \
LEGACY_EVP_MD_METH_TABLE(sha3_int_init, sha3_int_update, \
sha3_int_final, NULL, \
(KECCAK1600_WIDTH - bitlen * 2) / 8), \
}; \
return &sha3_##bitlen##_md; \
}
#define EVP_MD_SHAKE(bitlen) \
const EVP_MD *EVP_shake##bitlen(void) \
{ \
static const EVP_MD shake##bitlen##_md = { \
NID_shake##bitlen, \
0, \
bitlen / 8, \
EVP_MD_FLAG_XOF, \
EVP_ORIG_GLOBAL, \
LEGACY_EVP_MD_METH_TABLE(shake_init, sha3_int_update, sha3_int_final, \
shake_ctrl, (KECCAK1600_WIDTH - bitlen * 2) / 8), \
}; \
return &shake##bitlen##_md; \
}
EVP_MD_SHA3(224)
EVP_MD_SHA3(256)
EVP_MD_SHA3(384)
EVP_MD_SHA3(512)
EVP_MD_SHAKE(128)
EVP_MD_SHAKE(256)
|
5a4c69649f3b0a5af66a4142eea9063e0f0ff907
|
fcc9b5cb92607deaac4b097776ed0490789d8c3e
|
/src/runtime/safepoint.c
|
5a35658def441bb62720c194bf6a723284e9eacd
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-public-domain",
"UPL-1.0",
"LicenseRef-scancode-other-permissive",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-mit-specification-disclaimer",
"MIT"
] |
permissive
|
sbcl/sbcl
|
ef248b5e8614ba7f0a1132c4f2cfcb000a074400
|
85003adf60ef659082c244972e816ea62240b9cb
|
refs/heads/master
| 2023-09-01T05:14:15.225083
| 2023-08-31T20:09:49
| 2023-08-31T20:09:49
| 1,890,957
| 1,737
| 408
|
NOASSERTION
| 2023-08-28T13:05:04
| 2011-06-13T20:33:25
|
Common Lisp
|
UTF-8
|
C
| false
| false
| 38,340
|
c
|
safepoint.c
|
/*
* This software is part of the SBCL system. See the README file for
* more information.
*
* This software is derived from the CMU CL system, which was
* written at Carnegie Mellon University and released into the
* public domain. The software is in the public domain and is
* provided with absolutely no warranty. See the COPYING and CREDITS
* files for more information.
*/
#include "sbcl.h"
#ifdef LISP_FEATURE_SB_SAFEPOINT /* entire file */
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifndef LISP_FEATURE_WIN32
#include <sched.h>
#endif
#include <signal.h>
#include <stddef.h>
#include <errno.h>
#include <sys/types.h>
#ifndef LISP_FEATURE_WIN32
#include <sys/wait.h>
#endif
#include "runtime.h"
#include "validate.h"
#include "thread.h"
#include "arch.h"
#include "target-arch-os.h"
#include "os.h"
#include "globals.h"
#include "dynbind.h"
#include "genesis/cons.h"
#include "genesis/fdefn.h"
#include "interr.h"
#include "genesis/sap.h"
#include "gc.h"
#include "interrupt.h"
#include "lispregs.h"
#include "print.h"
const char* gc_phase_names[GC_NPHASES] = {
"GC_NONE",
"GC_FLIGHT",
"GC_MESSAGE",
"GC_INVOKED",
"GC_QUIET",
"GC_SETTLED",
"GC_COLLECT"
};
/* States and transitions:
*
* GC_NONE: Free running code.
*
* GC_NONE -> GC_FLIGHT: unmap_gc_page(), arming the GSP trap.
*
* GC_FLIGHT: GC triggered normally, waiting for post-allocation
* safepoint trap.
*
* GC_FLIGHT -> GC_MESSAGE: gc_notify_early(), arming the per-thread
* CSP traps.
*
* GC_MESSAGE: Waiting for lisp threads to stop (WITHOUT-GCING threads
* will resume at GC_INVOKED).
*
* GC_MESSAGE -> GC_INVOKED: map_gc_page(), disarming the GSP trap.
*
* GC_INVOKED: Waiting for WITHOUT-GCING threads to leave
* WITHOUT-GCING.
*
* GC_INVOKED -> GC_QUIET: nothing changes.
*
* GC_QUIET: GCing threads race to stop the world (and melt with you).
*
* GC_QUIET -> GC_SETTLED: unmap_gc_page(), gc_notify_final(), arming
* GSP and CSP traps again.
*
* GC_SETTLED: Waiting for remaining lisp threads to stop.
*
* GC_SETTLED -> GC_COLLECT: map_gc_page(), disarming the GSP trap.
*
* GC_COLLECT: World is stopped, save for one thread in SUB-GC / FLET
* PERFORM-GC, running the garbage collector.
*
* GC_COLLECT -> GC_NONE: gc_none(), clearing CSP traps and possibly
* GC_PENDING.
*
* GC_NONE: Free running code.
*
* Note that the system may not actually stop in every state for a GC.
* For example, a system with only one thread directly invoking
* SB-EXT:GC will advance quickly from GC_NONE to GC_COLLECT, simply
* because no other threads exist to prevent it. That same scenario
* with a thread inside WITHOUT-GCING sitting in alien code at the
* time will move to GC_INVOKED and then wait for the WITHOUT-GCING
* thread to finish up, then proceed to GC_COLLECT. */
#define CURRENT_THREAD_VAR(name) \
struct thread *name = get_sb_vm_thread()
#define THREAD_STOP_PENDING(th) \
read_TLS(STOP_FOR_GC_PENDING, th)
#define SET_THREAD_STOP_PENDING(th,state) \
write_TLS(STOP_FOR_GC_PENDING,state,th)
#define WITH_ALL_THREADS_LOCK \
ignore_value(mutex_acquire(&all_threads_lock)); \
RUN_BODY_ONCE(all_threads_lock, ignore_value(mutex_release(&all_threads_lock)))
#if !defined(LISP_FEATURE_WIN32)
/* win32-os.c covers these, but there is no unixlike-os.c, so the normal
* definition goes here. Fixme: (Why) don't these work for Windows?
*/
void
map_gc_page()
{
odxprint(misc, "map_gc_page");
os_protect((void *) GC_SAFEPOINT_PAGE_ADDR,
BACKEND_PAGE_BYTES,
OS_VM_PROT_READ);
}
void
unmap_gc_page()
{
odxprint(misc, "unmap_gc_page");
os_protect((void *) GC_SAFEPOINT_PAGE_ADDR, BACKEND_PAGE_BYTES, OS_VM_PROT_NONE);
}
#endif /* !LISP_FEATURE_WIN32 */
static struct gc_state {
#ifdef LISP_FEATURE_WIN32
/* Per-process lock for gc_state */
CRITICAL_SECTION lock;;
/* Conditions: one per phase */
CONDITION_VARIABLE phase_cond[GC_NPHASES];
#else
pthread_mutex_t lock;
pthread_cond_t phase_cond[GC_NPHASES];
#endif
/* For each [current or future] phase, a number of threads not yet ready to
* leave it */
int phase_wait[GC_NPHASES];
/* Master thread controlling the topmost stop/gc/start sequence */
struct thread* master;
struct thread* collector;
/* Current GC phase */
gc_phase_t phase;
} gc_state
#ifdef LISP_FEATURE_UNIX
= { PTHREAD_MUTEX_INITIALIZER,
{ PTHREAD_COND_INITIALIZER, PTHREAD_COND_INITIALIZER, PTHREAD_COND_INITIALIZER,
PTHREAD_COND_INITIALIZER, PTHREAD_COND_INITIALIZER, PTHREAD_COND_INITIALIZER,
PTHREAD_COND_INITIALIZER },
{ 0, 0, 0, 0, 0, 0, 0 }, NULL, NULL, GC_NONE }
#endif
;
void safepoint_init()
{
# ifdef LISP_FEATURE_WIN32
int i;
extern void alloc_gc_page(void);
alloc_gc_page();
for (i=GC_NONE; i<GC_NPHASES; ++i)
InitializeConditionVariable(&gc_state.phase_cond[i]);
InitializeCriticalSection(&gc_state.lock);
#else
os_alloc_gc_space(0, NOT_MOVABLE, GC_SAFEPOINT_PAGE_ADDR, BACKEND_PAGE_BYTES);
#endif
gc_state.phase = GC_NONE;
}
void
gc_state_lock()
{
odxprint(safepoints,"GC state to be locked");
int result = mutex_acquire(&gc_state.lock);
gc_assert(result);
if (gc_state.master) {
fprintf(stderr,"GC state lock glitch [%p] in thread %p phase %d (%s)\n",
gc_state.master,get_sb_vm_thread(),gc_state.phase,
gc_phase_names[gc_state.phase]);
odxprint(safepoints,"GC state lock glitch [%p]",gc_state.master);
}
gc_assert(!gc_state.master);
gc_state.master = get_sb_vm_thread();
odxprint(safepoints,"GC state locked in phase %d (%s)",
gc_state.phase, gc_phase_names[gc_state.phase]);
}
void
gc_state_unlock()
{
odxprint(safepoints,"GC state to be unlocked in phase %d (%s)",
gc_state.phase, gc_phase_names[gc_state.phase]);
gc_assert(get_sb_vm_thread()==gc_state.master);
gc_state.master = NULL;
int result = mutex_release(&gc_state.lock);
gc_assert(result);
odxprint(safepoints,"%s","GC state unlocked");
}
void
gc_state_wait(gc_phase_t phase)
{
struct thread* self = get_sb_vm_thread();
odxprint(safepoints,"Waiting for %d (%s) -> %d (%s) [%d holders]",
gc_state.phase, gc_phase_names[gc_state.phase],
phase, gc_phase_names[phase],
gc_state.phase_wait[gc_state.phase]);
gc_assert(gc_state.master == self);
gc_state.master = NULL;
while(gc_state.phase != phase && !(phase == GC_QUIET && (gc_state.phase > GC_QUIET))) {
CONDITION_VAR_WAIT(&gc_state.phase_cond[phase], &gc_state.lock);
}
gc_assert(gc_state.master == NULL);
gc_state.master = self;
}
int
gc_cycle_active(void)
{
return gc_state.phase != GC_NONE;
}
static void
set_csp_from_context(struct thread *self, os_context_t *ctx)
{
#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
void **sp = (void **) *os_context_register_addr(ctx, reg_SP);
/* On POSIX platforms, it is sufficient to investigate only the part
* of the stack that was live before the interrupt, because in
* addition, we consider interrupt contexts explicitly. On Windows,
* however, we do not keep an explicit stack of exception contexts,
* and instead arrange for the conservative stack scan to also cover
* the context implicitly. The obvious way to do that is to start
* at the context itself: */
#ifdef LISP_FEATURE_WIN32
gc_assert((void **) ctx < sp);
sp = (void**) ctx;
#endif
gc_assert((void **)self->control_stack_start
<= sp && sp
< (void **)self->control_stack_end);
#else
/* Note that the exact value doesn't matter much here, since
* platforms with precise GC use get_csp() only as a boolean -- the
* precise GC already keeps track of the stack pointer itself.
* That said, we're either in a foreign function call or have
* called fake_foreign_function_call(), and having accurate values
* here makes the debugging experience easier and less
* disconcerting. */
void **sp = (void **) access_control_stack_pointer(self);
#endif
csp_around_foreign_call(self) = (lispobj) sp;
}
static inline gc_phase_t gc_phase_next(gc_phase_t old) {
return (old+1) % GC_NPHASES;
}
static inline bool
thread_blocks_gc(struct thread *thread)
{
return read_TLS(GC_INHIBIT,thread)==LISP_T;
}
/* set_thread_csp_access -- alter page permissions for not-in-Lisp
flag (Lisp Stack Top) of the thread `th'. The flag may be modified
if `writable' is true.
Return true if there is a non-null value in the flag.
When a thread enters C code or leaves it, a per-thread location is
modified. That machine word serves as a not-in-Lisp flag; for
convenience, when in C, it's filled with a topmost stack location
that may contain Lisp data. When thread is in Lisp, the word
contains NULL.
GENCGC uses each thread's flag value for conservative garbage collection.
There is a full VM page reserved for this word; page permissions
are switched to read-only for race-free examine + wait + use
scenarios. */
static inline bool
set_thread_csp_access(struct thread* th, bool writable)
{
os_protect((char*)th - (THREAD_HEADER_SLOTS*N_WORD_BYTES) - THREAD_CSP_PAGE_SIZE,
THREAD_CSP_PAGE_SIZE,
writable? (OS_VM_PROT_READ|OS_VM_PROT_WRITE)
: (OS_VM_PROT_READ));
return csp_around_foreign_call(th) != 0;
}
static inline void gc_notify_early()
{
struct thread *self = get_sb_vm_thread(), *p;
odxprint(safepoints,"%s","global notification");
gc_assert(gc_state.phase == GC_MESSAGE);
/* We're setting up the per-thread traps to make sure that all
* lisp-side threads get stopped (if they are WITHOUT-GCING then
* they can resume once the GSP trap is disarmed), and all
* alien-side threads that are inside WITHOUT-GCING get their
* chance to run until they exit WITHOUT-GCING. */
WITH_ALL_THREADS_LOCK {
for_each_thread(p) {
/* This thread is already on a waitcount somewhere. */
if (p==self)
continue;
/* If there's a collector thread then it is already on a
* waitcount somewhere. And it may-or-may-not be this
* thread. */
if (p==gc_state.collector)
continue;
odxprint(safepoints,"notifying thread %p csp %p",p,csp_around_foreign_call(p));
bool was_in_lisp = !set_thread_csp_access(p,0);
if (was_in_lisp) {
/* Threads "in-lisp" block leaving GC_MESSAGE, as we
* need them to hit their CSP or the GSP, and we unmap
* the GSP when transitioning to GC_INVOKED. */
gc_state.phase_wait[GC_MESSAGE]++;
SET_THREAD_STOP_PENDING(p, LISP_T);
} else if (thread_blocks_gc(p)) {
/* Threads "in-alien" don't block leaving GC_MESSAGE,
* as the CSP trap is sufficient to catch them, but
* any thread that is WITHOUT-GCING prevents exit from
* GC_INVOKED. */
gc_state.phase_wait[GC_INVOKED]++;
SET_THREAD_STOP_PENDING(p, LISP_T);
}
}
}
}
static inline void gc_notify_final()
{
struct thread *p;
odxprint(safepoints,"%s","global notification");
gc_assert(gc_state.phase == GC_SETTLED);
gc_state.phase_wait[GC_SETTLED]=0;
/* All remaining lisp threads, except for the collector, now need
* to be stopped, so that the collector can run the GC. Any
* thread already stopped shows up as being "in-alien", so we
* don't bother with them here. */
WITH_ALL_THREADS_LOCK {
for_each_thread(p) {
if (p == gc_state.collector)
continue;
odxprint(safepoints,"notifying thread %p csp %p",p,csp_around_foreign_call(p));
bool was_in_lisp = !set_thread_csp_access(p,0);
if (was_in_lisp) {
gc_state.phase_wait[GC_SETTLED]++;
SET_THREAD_STOP_PENDING(p, LISP_T);
}
}
}
}
static inline void gc_done()
{
CURRENT_THREAD_VAR(self);
struct thread *p;
bool inhibit = (read_TLS(GC_INHIBIT,self)==LISP_T);
odxprint(safepoints,"%s","global denotification");
WITH_ALL_THREADS_LOCK {
for_each_thread(p) {
if (inhibit && (read_TLS(GC_PENDING,p)==LISP_T))
write_TLS(GC_PENDING,NIL,p);
set_thread_csp_access(p,1);
}
}
}
static inline void gc_handle_phase()
{
odxprint(safepoints,"Entering phase %d (%s)",
gc_state.phase, gc_phase_names[gc_state.phase]);
switch (gc_state.phase) {
case GC_FLIGHT:
unmap_gc_page();
break;
case GC_MESSAGE:
gc_notify_early();
break;
case GC_INVOKED:
map_gc_page();
break;
case GC_SETTLED:
gc_notify_final();
unmap_gc_page();
break;
case GC_COLLECT:
map_gc_page();
break;
case GC_NONE:
gc_done();
break;
default:
break;
}
}
/* become ready to leave the <old> phase, but unready to leave the <new> phase;
* `old' can be GC_NONE, it means this thread weren't blocking any state. `cur'
* can be GC_NONE, it means this thread wouldn't block GC_NONE, but still wait
* for it. */
static inline void gc_advance(gc_phase_t cur, gc_phase_t old) {
odxprint(safepoints,"GC advance request %d (%s) -> %d (%s) in phase %d (%s)",
old, gc_phase_names[old], cur, gc_phase_names[cur],
gc_state.phase, gc_phase_names[gc_state.phase]);
if (cur == old)
return;
if (cur == gc_state.phase)
return;
if (old < gc_state.phase)
old = GC_NONE;
if (old != GC_NONE) {
gc_state.phase_wait[old]--;
odxprint(safepoints,"%d holders of phase %d (%s) without me",gc_state.phase_wait[old],old,gc_phase_names[old]);
}
if (cur != GC_NONE) {
gc_state.phase_wait[cur]++;
odxprint(safepoints,"%d holders of phase %d (%s) with me",gc_state.phase_wait[cur],cur,gc_phase_names[cur]);
}
/* roll forth as long as there's no waiters */
while (gc_state.phase_wait[gc_state.phase]==0
&& gc_state.phase != cur) {
gc_state.phase = gc_phase_next(gc_state.phase);
odxprint(safepoints,"no blockers, direct advance to %d (%s)",gc_state.phase,gc_phase_names[gc_state.phase]);
gc_handle_phase();
CONDITION_VAR_WAKE_ALL(&gc_state.phase_cond[gc_state.phase]);
}
odxprint(safepoints,"going to wait for %d threads",gc_state.phase_wait[gc_state.phase]);
gc_state_wait(cur);
}
void
thread_register_gc_trigger()
{
odxprint(misc, "/thread_register_gc_trigger");
struct thread *self = get_sb_vm_thread();
WITH_GC_STATE_LOCK {
if (gc_state.phase == GC_NONE &&
read_TLS(IN_SAFEPOINT,self)!=LISP_T &&
!thread_blocks_gc(self)) {
/* A thread (this thread), while doing allocation, has
* determined that we need to run the garbage collector.
* But it's in the middle of initializing an object, so we
* advance to GC_FLIGHT, arming the GSP trap with the idea
* that there is a GSP trap check once the allocated
* object is initialized. Any thread that has GC_PENDING
* set and GC_INHIBIT clear can take over from here (see
* thread_in_lisp_raised()), but some thread must. */
gc_advance(GC_FLIGHT,GC_NONE);
}
}
}
#ifdef LISP_FEATURE_SB_SAFEPOINT
static inline int
thread_may_thrupt(os_context_t *ctx)
{
struct thread * self = get_sb_vm_thread();
/* Thread may be interrupted if all of these are true:
* 1) Deferrables are unblocked in the context of the signal that
* went into the safepoint. -- Otherwise the surrounding code
* didn't want to be interrupted by a signal, so presumably it didn't
* want to be INTERRUPT-THREADed either.
* (See interrupt_handle_pending for an exception.)
* 2) On POSIX: There is no pending signal. This is important even
* after checking the sigmask, since we could be in the
* handle_pending trap following re-enabling of interrupts.
* Signals are unblocked in that case, but the signal is still
* pending; we want to run GC before handling the signal and
* therefore entered this safepoint. But the thruption would call
* ALLOW-WITH-INTERRUPTS, and could re-enter the handle_pending
* trap, leading to recursion.
* 3) INTERRUPTS_ENABLED is non-nil.
* 4) No GC pending; it takes precedence.
* Note that we are in a safepoint here, which is always outside of PA. */
if (read_TLS(INTERRUPTS_ENABLED, self) == NIL)
return 0;
if (read_TLS(GC_PENDING, self) != NIL)
return 0;
if (THREAD_STOP_PENDING(self) != NIL)
return 0;
#ifdef LISP_FEATURE_WIN32
if (deferrables_blocked_p(&thread_extra_data(self)->blocked_signal_set))
return 0;
#else
/* ctx is NULL if the caller wants to ignore the sigmask. */
if (ctx && deferrables_blocked_p(os_context_sigmask_addr(ctx)))
return 0;
if (read_TLS(INTERRUPT_PENDING, self) != NIL)
return 0;
#endif
return 1;
}
// returns 0 if skipped, 1 otherwise
int
check_pending_thruptions(os_context_t *ctx)
{
struct thread *p = get_sb_vm_thread();
#ifdef LISP_FEATURE_WIN32
sigset_t oldset;
/* On Windows, wake_thread/kill_safely does not set THRUPTION_PENDING
* in the self-kill case; instead we do it here while also clearing the
* "signal". */
if (thread_extra_data(p)->pending_signal_set)
if (__sync_fetch_and_and(&thread_extra_data(p)->pending_signal_set,0))
write_TLS(THRUPTION_PENDING, LISP_T, p);
#endif
if (!thread_may_thrupt(ctx))
return 0;
if (read_TLS(THRUPTION_PENDING, p) == NIL)
return 0;
write_TLS(THRUPTION_PENDING, NIL, p);
#ifdef LISP_FEATURE_WIN32
oldset = thread_extra_data(p)->blocked_signal_set;
thread_extra_data(p)->blocked_signal_set = deferrable_sigset;
#else
sigset_t oldset;
block_deferrable_signals(&oldset);
#endif
int was_in_lisp = ctx && !foreign_function_call_active_p(p);
if (was_in_lisp) {
fake_foreign_function_call(ctx);
}
DX_ALLOC_SAP(context_sap, ctx);
WITH_GC_AT_SAFEPOINTS_ONLY() {
funcall1(StaticSymbolFunction(RUN_INTERRUPTION), context_sap);
}
if (was_in_lisp)
undo_fake_foreign_function_call(ctx);
#ifdef LISP_FEATURE_WIN32
thread_extra_data(p)->blocked_signal_set = oldset;
if (ctx) ctx->sigmask = oldset;
#else
thread_sigmask(SIG_SETMASK, &oldset, 0);
#endif
return 1;
}
#endif
int
on_stack_p(struct thread *th, void *esp)
{
return (void *)th->control_stack_start
<= esp && esp
< (void *)th->control_stack_end;
}
#ifndef LISP_FEATURE_WIN32
/* (Technically, we still allocate an altstack even on Windows. Since
* Windows has a contiguous stack with an automatic guard page of
* user-configurable size instead of an alternative stack though, the
* SBCL-allocated altstack doesn't actually apply and won't be used.) */
int
on_altstack_p(struct thread *th, void *esp)
{
void *start = (char *)th+dynamic_values_bytes;
void *end = (char *)start + 32*SIGSTKSZ;
return start <= esp && esp < end;
}
#endif
void
assert_on_stack(struct thread *th, void *esp)
{
if (on_stack_p(th, esp))
return;
#ifndef LISP_FEATURE_WIN32
if (on_altstack_p(th, esp))
lose("thread %p: esp on altstack: %p", th, esp);
#endif
lose("thread %p: bogus esp: %p (range=%p..%p)", th, esp,
th->control_stack_start, th->control_stack_end);
}
/// Similar to the one in gc-common, but without the sigmask test.
static bool can_invoke_post_gc(struct thread* th)
{
lispobj obj = th->lisp_thread;
if (!obj) return 0;
struct thread_instance* lispthread = (void*)(obj - INSTANCE_POINTER_LOWTAG);
if (!lispthread->uw_primitive_thread) return 0;
return 1;
}
// returns 0 if skipped, 1 otherwise
int check_pending_gc(__attribute((unused)) os_context_t *ctx)
{
odxprint(misc, "check_pending_gc");
struct thread * self = get_sb_vm_thread();
int done = 0;
sigset_t sigset;
if ((read_TLS(IN_SAFEPOINT,self) == LISP_T) &&
((read_TLS(GC_INHIBIT,self) == NIL) &&
(read_TLS(GC_PENDING,self) == NIL))) {
write_TLS(IN_SAFEPOINT,NIL,self);
}
if (!thread_blocks_gc(self) && (read_TLS(IN_SAFEPOINT, self) == NIL)) {
if (read_TLS(GC_PENDING, self) == LISP_T) {
lispobj gc_happened = NIL;
bind_variable(IN_SAFEPOINT,LISP_T,self);
block_deferrable_signals(&sigset);
if(read_TLS(GC_PENDING,self)==LISP_T)
gc_happened = funcall1(StaticSymbolFunction(SUB_GC), 0);
unbind(self);
thread_sigmask(SIG_SETMASK,&sigset,NULL);
if (gc_happened == LISP_T) {
/* POST_GC wants to enable interrupts */
if ((read_TLS(INTERRUPTS_ENABLED,self) == LISP_T ||
read_TLS(ALLOW_WITH_INTERRUPTS,self) == LISP_T)
&& can_invoke_post_gc(self))
funcall0(StaticSymbolFunction(POST_GC));
done = 1;
}
}
}
return done;
}
void thread_in_lisp_raised(os_context_t *ctxptr)
{
struct thread *self = get_sb_vm_thread();
bool check_gc_and_thruptions = 0;
odxprint(safepoints,"%s","thread_in_lisp_raised");
/* Either we just hit the GSP trap, or we took a PIT stop and
* there is a stop-for-GC or thruption pending. */
WITH_GC_STATE_LOCK {
if (gc_state.phase == GC_FLIGHT &&
read_TLS(GC_PENDING,self)==LISP_T &&
!thread_blocks_gc(self) && read_TLS(IN_SAFEPOINT,self)!=LISP_T) {
/* Some thread (possibly even this one) that does not have
* GC_INHIBIT set has noticed that a GC is warranted and
* advanced the phase to GC_FLIGHT, arming the GSP trap,
* which this thread has hit. This thread doesn't have
* GC_INHIBIT set, and has also noticed that a GC is
* warranted. It doesn't matter which thread pushes
* things forwards at this point, just that it happens.
* This thread is now a candidate for running the GC, so
* we advance to GC_QUIET, where the only threads still
* running are competing to run the GC. */
set_csp_from_context(self, ctxptr);
gc_advance(GC_QUIET,GC_FLIGHT);
set_thread_csp_access(self,1);
/* If a thread has already reached gc_stop_the_world(),
* just wait until the world starts again. */
if (gc_state.collector) {
gc_advance(GC_NONE,GC_QUIET);
} else {
/* ??? Isn't this already T? */
write_TLS(GC_PENDING,LISP_T,self);
}
csp_around_foreign_call(self) = 0;
check_gc_and_thruptions = 1;
} else {
/* This thread isn't a candidate for running the GC
* (yet?), so we can't advance past GC_FLIGHT, so wait for
* the next phase, GC_MESSAGE, before we do anything. */
if (gc_state.phase == GC_FLIGHT) {
gc_state_wait(GC_MESSAGE);
}
if (!thread_blocks_gc(self)) {
/* This thread doesn't have GC_INHIBIT set, so sit
* tight and wait for the GC to be over. The current
* phase is GC_MESSAGE, GC_INVOKED, GC_QUIET, or
* GC_SETTLED. */
SET_THREAD_STOP_PENDING(self,NIL);
set_thread_csp_access(self,1);
set_csp_from_context(self, ctxptr);
if (gc_state.phase <= GC_SETTLED)
gc_advance(GC_NONE,gc_state.phase);
else
gc_state_wait(GC_NONE);
csp_around_foreign_call(self) = 0;
check_gc_and_thruptions = 1;
} else {
/* This thread has GC_INHIBIT set, meaning that it's
* within a WITHOUT-GCING, so advance from wherever we
* are (GC_MESSAGE) to GC_INVOKED so that we can
* continue running. When we leave the WITHOUT-GCING
* we'll take a PIT stop and wind up in the case
* above... Or we'll call gc_stop_the_world(). */
gc_advance(GC_INVOKED,gc_state.phase);
SET_THREAD_STOP_PENDING(self,LISP_T);
/* Why do we not want to run thruptions here? */
}
}
}
/* If we still need to GC, and it's not inhibited, call into
* SUB-GC. Phase is either GC_QUIET or GC_NONE. */
if (check_gc_and_thruptions) {
check_pending_gc(ctxptr);
#ifdef LISP_FEATURE_SB_SAFEPOINT
while(check_pending_thruptions(ctxptr));
#endif
}
}
void thread_in_safety_transition(os_context_t *ctxptr)
{
struct thread *self = get_sb_vm_thread();
bool was_in_alien;
odxprint(safepoints,"%s","GC safety transition");
WITH_GC_STATE_LOCK {
was_in_alien = set_thread_csp_access(self,1);
if (was_in_alien) {
/* This is an alien->lisp or alien->alien transition. */
if (thread_blocks_gc(self)) {
/* gc_notify_early() accounted for this thread as not
* being able to leave GC_INVOKED when it armed our
* CSP trap, but some other threads may still be
* holding things back at GC_MESSAGE, so wait for
* GC_INVOKED before continuing. Don't advance, the
* threads preventing exit from GC_MESSAGE have that
* privilege. */
gc_state_wait(GC_INVOKED);
} else {
/* This thread isn't within a WITHOUT-GCING, so just
* wait until the GC is done before continuing. */
gc_state_wait(GC_NONE);
}
} else {
/* This is a lisp->alien or lisp->lisp transition. */
if (!thread_blocks_gc(self)) {
/* This thread doesn't have GC_INHIBIT set, so sit
* tight and wait for the GC to be over. This is
* virtually the same logic as the similar case in
* thread_in_lisp_raised(). */
SET_THREAD_STOP_PENDING(self,NIL);
set_csp_from_context(self, ctxptr);
if (gc_state.phase <= GC_SETTLED)
gc_advance(GC_NONE,gc_state.phase);
else
gc_state_wait(GC_NONE);
csp_around_foreign_call(self) = 0;
} else {
/* This thread has GC_INHIBIT set, meaning that it's
* within a WITHOUT-GCING, so advance from wherever we
* are (GC_MESSAGE) to GC_INVOKED so that we can
* continue running. When we leave the WITHOUT-GCING
* we'll take a PIT stop and wind up in the case
* above... Or we'll call gc_stop_the_world(). This
* logic is identical to the similar case in
* thread_in_lisp_raised(). */
gc_advance(GC_INVOKED,gc_state.phase);
SET_THREAD_STOP_PENDING(self,LISP_T);
}
}
}
#ifdef LISP_FEATURE_SB_SAFEPOINT
if (was_in_alien) {
while(check_pending_thruptions(ctxptr));
}
#endif
}
#ifdef LISP_FEATURE_WIN32
void thread_interrupted(os_context_t *ctxptr)
{
struct thread *self = get_sb_vm_thread();
bool gc_active, was_in_alien;
odxprint(safepoints,"%s","pending interrupt trap");
WITH_GC_STATE_LOCK {
gc_active = gc_cycle_active();
if (gc_active) {
was_in_alien = set_thread_csp_access(self,1);
}
}
if (gc_active) {
if (was_in_alien) {
thread_in_safety_transition(ctxptr);
} else {
thread_in_lisp_raised(ctxptr);
}
}
check_pending_gc(ctxptr);
#ifdef LISP_FEATURE_SB_SAFEPOINT
while(check_pending_thruptions(ctxptr));
#endif
}
#endif
void
gc_stop_the_world()
{
struct thread* self = get_sb_vm_thread();
odxprint(safepoints, "stop the world");
WITH_GC_STATE_LOCK {
/* This thread is the collector, and needs special handling in
* gc_notify_early() and gc_notify_final() because of it. */
gc_state.collector = self;
/* And we need to control advancement past GC_QUIET. */
gc_state.phase_wait[GC_QUIET]++;
/* So, we won the race to get to gc_stop_the_world(). Now we
* need to get to GC_COLLECT, where we're the only thread
* running, so that we can run the collector. What we do
* depends on what's already been done. */
switch(gc_state.phase) {
case GC_NONE:
gc_advance(GC_QUIET,gc_state.phase);
/* FALLTHRU */
case GC_FLIGHT:
case GC_MESSAGE:
case GC_INVOKED:
if ((gc_state.phase == GC_MESSAGE)
|| (gc_state.phase == GC_INVOKED)) {
/* If the phase was GC_MESSAGE or GC_INVOKED, we were
* accounted as "in alien", and are on the GC_INVOKED
* waitcount, or we were "in lisp" but in WITHOUT-GCING,
* which led to us putting OURSELVES on the GC_INVOKED
* waitcount. */
gc_advance(GC_QUIET, GC_INVOKED);
} else {
gc_state_wait(GC_QUIET);
}
/* FALLTHRU */
case GC_QUIET:
/* Some number of threads were trying to get to GC_QUIET.
* But this thread is sufficient to be able to leave
* GC_QUIET. */
gc_state.phase_wait[GC_QUIET]=1;
/* Advance through GC_SETTLED to GC_COLLECT, stopping the
* other threads that were racing to stop the world. */
gc_advance(GC_COLLECT,GC_QUIET);
break;
case GC_COLLECT:
break;
default:
lose("Stopping the world in unexpected state %d",gc_state.phase);
break;
}
set_thread_csp_access(self,1);
}
SET_THREAD_STOP_PENDING(self,NIL);
}
void gc_start_the_world()
{
odxprint(safepoints,"%s","start the world");
WITH_GC_STATE_LOCK {
gc_state.collector = NULL;
gc_advance(GC_NONE,GC_COLLECT);
}
}
#ifdef LISP_FEATURE_SB_SAFEPOINT
/* wake_thread(thread) -- ensure a thruption delivery to
* `thread'. */
# ifdef LISP_FEATURE_WIN32
void
wake_thread_io(struct thread * thread)
{
SetEvent(thread_private_events(thread,1));
win32_maybe_interrupt_io(thread);
}
void wake_thread_impl(struct thread_instance *lispthread)
{
struct thread* thread = (void*)lispthread->uw_primitive_thread;
wake_thread_io(thread);
if (read_TLS(THRUPTION_PENDING,thread)==LISP_T)
return;
write_TLS(THRUPTION_PENDING,LISP_T,thread);
if ((read_TLS(GC_PENDING,thread)==LISP_T)
||(THREAD_STOP_PENDING(thread)==LISP_T)
)
return;
wake_thread_io(thread);
mutex_release(&all_threads_lock);
WITH_GC_STATE_LOCK {
if (gc_state.phase == GC_NONE) {
gc_advance(GC_INVOKED,GC_NONE);
gc_advance(GC_NONE,GC_INVOKED);
}
}
mutex_acquire(&all_threads_lock);
return;
}
# else
void wake_thread_impl(struct thread_instance *lispthread)
{
struct thread *thread = (void*)lispthread->uw_primitive_thread;
struct thread *self = get_sb_vm_thread();
/* Must not and need not attempt to signal ourselves while we're the
* STW initiator. */
if (thread == self) {
write_TLS(THRUPTION_PENDING,LISP_T,self);
while (check_pending_thruptions(0 /* ignore the sigmask */))
;
return;
}
/* We are not in a signal handler here, so need to block signals
* manually. */
sigset_t oldset;
block_deferrable_signals(&oldset);
WITH_GC_STATE_LOCK {
if (gc_state.phase == GC_NONE) {
odxprint(safepoints, "wake_thread_posix: invoking");
gc_advance(GC_INVOKED,GC_NONE);
{
/* I do not know whether WITH_ALL_THREADS_LOCK was only to avoid
* hitting wild pointers in the loop over threads (gone now)
* or whether it _also_ had an effect on the safepoint state.
* Out of caution I'm leaving it in despite removing the loop */
/* only if in foreign code, notify using signal */
WITH_ALL_THREADS_LOCK {
do {
odxprint(safepoints, "wake_thread_posix: found");
write_TLS(THRUPTION_PENDING,LISP_T,thread);
if (read_TLS(GC_PENDING,thread) == LISP_T
|| THREAD_STOP_PENDING(thread) == LISP_T)
break;
if (os_get_csp(thread)) {
odxprint(safepoints, "wake_thread_posix: kill");
/* ... and in foreign code. Push it into a safety
* transition. */
int status = pthread_kill((pthread_t)lispthread->uw_os_thread, SIGURG);
if (status)
lose("wake_thread_posix: pthread_kill failed with %d",
status);
}
} while(0);
}
}
gc_advance(GC_NONE,GC_INVOKED);
} else {
odxprint(safepoints, "wake_thread_posix: passive");
write_TLS(THRUPTION_PENDING, LISP_T, thread);
}
}
thread_sigmask(SIG_SETMASK, &oldset, 0);
}
#endif /* !LISP_FEATURE_WIN32 */
#endif /* LISP_FEATURE_SB_SAFEPOINT */
void* os_get_csp(struct thread* th)
{
return (void*)csp_around_foreign_call(th);
}
#ifndef LISP_FEATURE_WIN32
# ifdef LISP_FEATURE_SB_SAFEPOINT
/* This is basically what 'low_level_maybe_now_maybe_later' was (which doesn't exist),
* but with a different name, and different way of deciding to defer the signal */
void thruption_handler(__attribute__((unused)) int signal,
__attribute__((unused)) siginfo_t *info,
os_context_t *ctx)
{
struct thread *self = get_sb_vm_thread();
void *transition_sp = os_get_csp(self);
if (!transition_sp)
/* In Lisp code. Do not run thruptions asynchronously. The
* next safepoint will take care of it. */
return;
#ifndef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
if (!foreign_function_call_active_p(self))
lose("csp && !ffca");
#endif
/* In C code. As a rule, we assume that running thruptions is OK. */
csp_around_foreign_call(self) = 0;
thread_in_lisp_raised(ctx);
csp_around_foreign_call(self) = (intptr_t) transition_sp;
}
# endif
#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
/* Trap trampolines are in target-assem.S so that they pick up the
* trap instruction selection features automatically. */
extern lispobj
handle_global_safepoint_violation(lispobj fun, lispobj *args, int nargs);
extern lispobj
handle_csp_safepoint_violation(lispobj fun, lispobj *args, int nargs);
#endif
int
handle_safepoint_violation(os_context_t *ctx, os_vm_address_t fault_address)
{
struct thread *self = get_sb_vm_thread();
if (fault_address == (os_vm_address_t) GC_SAFEPOINT_TRAP_ADDR) {
#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
/* We're on the altstack and don't want to run Lisp code. */
arrange_return_to_c_function(ctx, handle_global_safepoint_violation, 0);
#else
if (foreign_function_call_active_p(self)) lose("GSP trap in C?");
fake_foreign_function_call(ctx);
thread_in_lisp_raised(ctx);
undo_fake_foreign_function_call(ctx);
#endif
return 1;
}
if ((1+THREAD_HEADER_SLOTS)+(lispobj*)fault_address == (lispobj*)self) {
#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
arrange_return_to_c_function(ctx, handle_csp_safepoint_violation, 0);
#else
if (!foreign_function_call_active_p(self)) lose("CSP trap in Lisp?");
thread_in_safety_transition(ctx);
#endif
return 1;
}
/* not a safepoint */
return 0;
}
#endif /* LISP_FEATURE_WIN32 */
void
vodxprint_fun(const char *fmt, va_list args)
{
#ifdef LISP_FEATURE_WIN32
DWORD lastError = GetLastError();
#endif
int original_errno = errno;
char buf[1024];
int n = 0;
snprintf(buf, sizeof(buf), "["THREAD_ID_LABEL"] ", THREAD_ID_VALUE);
n = strlen(buf);
vsnprintf(buf + n, sizeof(buf) - n - 1, fmt, args);
/* buf is now zero-terminated (even in case of overflow).
* Our caller took care of the newline (if any) through `fmt'. */
/* A sufficiently POSIXy implementation of stdio will provide
* per-FILE locking, as defined in the spec for flockfile. At least
* glibc complies with this. Hence we do not need to perform
* locking ourselves here. (Should it turn out, of course, that
* other libraries opt for speed rather than safety, we need to
* revisit this decision.) */
fputs(buf, stderr);
#ifdef LISP_FEATURE_WIN32
/* stdio's stderr is line-bufferred, i.e. \n ought to flush it.
* Unfortunately, MinGW does not behave the way I would expect it
* to. Let's be safe: */
fflush(stderr);
#endif
#ifdef LISP_FEATURE_WIN32
SetLastError(lastError);
#endif
errno = original_errno;
}
void
odxprint_fun(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vodxprint_fun(fmt, args);
va_end(args);
}
#endif /* LISP_FEATURE_SB_SAFEPOINT -- entire file */
|
d62ef46ec73cf08237d4cd9673115c802e02dc25
|
52c8ed39b32ccc7c0673278c1adea3638797c9ff
|
/src/lib/libm/arm32/sqrt.c
|
b4f5e7ca4e199a6d23690473c3d21098311c125f
|
[
"MIT"
] |
permissive
|
xboot/xboot
|
0cab7b440b612aa0a4c366025598a53a7ec3adf1
|
6d6b93947b7fcb8c3924fedb0715c23877eedd5e
|
refs/heads/master
| 2023-08-20T05:56:25.149388
| 2023-07-12T07:38:29
| 2023-07-12T07:38:29
| 471,539
| 765
| 296
|
MIT
| 2023-05-25T09:39:01
| 2010-01-14T08:25:12
|
C
|
UTF-8
|
C
| false
| false
| 132
|
c
|
sqrt.c
|
#if __ARM32_ARCH__ >= 7
double sqrt(double x)
{
__asm__ __volatile__("vsqrt.f64 %P0, %P1" : "=w"(x) : "w"(x));
return x;
}
#endif
|
fbd9c7ef1f26b45d7a0892c261674c92bf874ed0
|
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
|
/games/hangman/files/patch-src-dict.c
|
3fa85ab973f2caf2fb4ff338f4ff793b916ce95e
|
[
"BSD-2-Clause"
] |
permissive
|
freebsd/freebsd-ports
|
86f2e89d43913412c4f6b2be3e255bc0945eac12
|
605a2983f245ac63f5420e023e7dce56898ad801
|
refs/heads/main
| 2023-08-30T21:46:28.720924
| 2023-08-30T19:33:44
| 2023-08-30T19:33:44
| 1,803,961
| 916
| 918
|
NOASSERTION
| 2023-09-08T04:06:26
| 2011-05-26T11:15:35
| null |
UTF-8
|
C
| false
| false
| 1,208
|
c
|
patch-src-dict.c
|
--- src/dict.c.orig Sun Apr 11 01:22:40 2004
+++ src/dict.c Thu Nov 10 05:18:25 2005
@@ -84,11 +84,15 @@
****************************************************/
static void resize()
{
+ struct node *d;
+ struct node *t;
+ struct node ** old_table = dictionary;
unsigned int a;
+ size_t i;
size_t old_size = sizeOfTable;
- sizeOfTable *= 2;
numOfEntries = 0;
- struct node ** old_table = dictionary;
+
+ sizeOfTable *= 2;
dictionary = (struct node **)malloc(sizeof(struct node *) * sizeOfTable);
if(!dictionary) {
@@ -99,8 +103,9 @@
/* now copy old table into new table
and delete the old one */
- for(size_t i = 0; i < old_size; i++) {
- struct node * d = NULL, * t = old_table[i];
+ for(i = 0; i < old_size; i++) {
+ d = NULL;
+ t = old_table[i];
while(t) {
addToDictionary(t->word);
d = t;
@@ -157,10 +162,11 @@
{
FILE * input = NULL;
unsigned int a;
+ size_t i;
/* Delete the old dictionary if one exists */
if(dictionary) {
- for(size_t i = 0; i < sizeOfTable; i++) {
+ for(i = 0; i < sizeOfTable; i++) {
struct node * t = dictionary[i];
while(t) {
free(t->word);
|
8d3018d9154b335e2243b6390d7a797a8337afb1
|
7fcb614a59a138019b2845a6e493f9d22c44852d
|
/MCUME_esp32/espcastaway/main/tab_EnvelopeShapeValues.h
|
2c14a1cb30eaebcfdfd9be22f32e1ddbefe6980c
|
[] |
no_license
|
Jean-MarcHarvengt/MCUME
|
9180feaf8195c6a0a38eba6c12733c987fa98062
|
fe1280985d9a86bfb2166842a56c8eec768aa666
|
refs/heads/master
| 2023-06-09T23:11:04.216374
| 2023-05-28T15:24:02
| 2023-05-28T15:24:02
| 203,546,040
| 318
| 43
| null | 2023-03-21T13:19:51
| 2019-08-21T08:58:23
|
C
|
UTF-8
|
C
| false
| false
| 71,770
|
h
|
tab_EnvelopeShapeValues.h
|
static const int EnvelopeShapeValues[16384] = {
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,
111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,
95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,
79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,
63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,
47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,
31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,
15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0,
-1,-2,-3,-4,-5,-6,-7,-8,-9,-10,-11,-12,-13,-14,-15,-16,
-17,-18,-19,-20,-21,-22,-23,-24,-25,-26,-27,-28,-29,-30,-31,-32,
-33,-34,-35,-36,-37,-38,-39,-40,-41,-42,-43,-44,-45,-46,-47,-48,
-49,-50,-51,-52,-53,-54,-55,-56,-57,-58,-59,-60,-61,-62,-63,-64,
-65,-66,-67,-68,-69,-70,-71,-72,-73,-74,-75,-76,-77,-78,-79,-80,
-81,-82,-83,-84,-85,-86,-87,-88,-89,-90,-91,-92,-93,-94,-95,-96,
-97,-98,-99,-100,-101,-102,-103,-104,-105,-106,-107,-108,-109,-110,-111,-112,
-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-125,-126,-127,-128,
-128,-127,-126,-125,-124,-123,-122,-121,-120,-119,-118,-117,-116,-115,-114,-113,
-112,-111,-110,-109,-108,-107,-106,-105,-104,-103,-102,-101,-100,-99,-98,-97,
-96,-95,-94,-93,-92,-91,-90,-89,-88,-87,-86,-85,-84,-83,-82,-81,
-80,-79,-78,-77,-76,-75,-74,-73,-72,-71,-70,-69,-68,-67,-66,-65,
-64,-63,-62,-61,-60,-59,-58,-57,-56,-55,-54,-53,-52,-51,-50,-49,
-48,-47,-46,-45,-44,-43,-42,-41,-40,-39,-38,-37,-36,-35,-34,-33,
-32,-31,-30,-29,-28,-27,-26,-25,-24,-23,-22,-21,-20,-19,-18,-17,
-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,
48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,
64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,
80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,
96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,
112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,
-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128,-128
};
|
da6bd8dfbc8cf1637b50130f0f844649228ccd5c
|
fa1e5fac8c88c7ee3b4b6540ee3e1a2668c02557
|
/Python/ext_src/abc_dir.c
|
443d8816234a902976159bced3f0bb6f64b3bcd6
|
[
"MIT"
] |
permissive
|
zhaokg/Rbeast
|
cb6e6a2b6f846c3193c3299c72784466d331aa2b
|
2dfe69eb9d8b44512231a67aceee7f46f38d20bd
|
refs/heads/master
| 2023-09-03T10:20:22.697438
| 2023-09-01T14:38:42
| 2023-09-01T14:38:42
| 199,787,044
| 124
| 29
| null | 2022-04-07T01:11:45
| 2019-07-31T05:42:54
|
C
|
UTF-8
|
C
| false
| false
| 3,675
|
c
|
abc_dir.c
|
#include "abc_000_macro.h"
#include "abc_000_warning.h"
#if defined(_MSC_VER)
#ifndef _CRT_SECURE_NO_DEPRECATE
#define _CRT_SECURE_NO_DEPRECATE (1)
#define _CRT_NONSTDC_NO_DEPRECATE
#endif
//#pragma warning(disable : 4996)
#endif
#if defined(WIN64_OS) || defined (WIN32_OS)
#include "abc_dir.h"
#include "abc_ide_util.h" //r_printf
// github.com/tronkko/dirent/tree/master/include
// codeforwin.org/2018/03/c-program-to-list-all-files-in-a-directory-recursively.html
void listFiles(const char *path, const char * ext)
{
DIR *dir = opendir(path);
// Unable to open directory stream
if (dir == NULL) return ;
struct dirent *dp;
while ((dp = readdir(dir)) != NULL)
{
char fullpath[1000];
strcpy(fullpath, path);
strcat(fullpath, "/");
strcat(fullpath, dp->d_name);
// stackoverflow.com/questions/22886290/c-get-all-files-with-certain-extension
if (dp->d_type == DT_REG /*DT_DIR*/) {
char * const pext = strrchr(dp->d_name, '.');
if (pext != NULL && pext != dp->d_name) {
if (strcmp(pext, ".tif") == 0){
r_printf("%s\n", dp->d_name);
}
}
}
/*
struct stat s;
if (stat(fullpath, &s) == 0)
{
if (s.st_mode & S_IFDIR) ; //it's a directory;
else if (s.st_mode & S_IFREG) ;//it's a file
}*/
} //while ( (dp = readdir(dir)) != NULL)
closedir(dir); // Close directory stream
}
FILELIST_PTR GetFlist(const char *path, const char * ext)
{
DIR *dir = opendir(path);
// Unable to open directory stream
if (dir == NULL)return NULL;
int fNum = 0;
int memSize = 0;
struct dirent *dp;
while ((dp = readdir(dir)) != NULL)
{
// stackoverflow.com/questions/22886290/c-get-all-files-with-certain-extension
if (dp->d_type == DT_REG /*DT_DIR*/) {
char * const pEXT = strrchr(dp->d_name, '.');
if (pEXT != NULL && pEXT != dp->d_name)
{
if (stricmp(pEXT+1, ext) == 0){
fNum++;
memSize += (int)strlen(dp->d_name)+ 1L;
}
}
}
/*
struct stat s;
if (stat(fullpath, &s) == 0)
{
if (s.st_mode & S_IFDIR) ; //it's a directory;
else if (s.st_mode & S_IFREG) ;//it's a file
}*/
} //while ( (dp = readdir(dir)) != NULL)
/*************************************************/
FILELIST_PTR flist = malloc(sizeof(FILELIST));
memset(flist, 0, sizeof(FILELIST));
flist->num = fNum;
flist->base = malloc(memSize);
memset(flist->base, 0, memSize);
flist->offset = malloc(fNum*sizeof(ptrdiff_t));
fNum = 0;
char * ptr = flist->base;
rewinddir(dir);
while ((dp = readdir(dir)) != NULL) {
// stackoverflow.com/questions/22886290/c-get-all-files-with-certain-extension
if (dp->d_type == DT_REG /*DT_DIR*/) {
char * const pEXT = strrchr(dp->d_name, '.');
if (pEXT != NULL && pEXT != dp->d_name)
{
if (stricmp(pEXT+1, ext) == 0){
int size = (int)strlen(dp->d_name);
flist->offset[fNum] = ptr; //-Wint-conversion
memcpy(ptr, dp->d_name, size);
ptr[size] = 0;
fNum++;
ptr += (size + 1);
}
}
}
/*
struct stat s;
if (stat(fullpath, &s) == 0)
{
if (s.st_mode & S_IFDIR) ; //it's a directory;
else if (s.st_mode & S_IFREG) ;//it's a file
}*/
} //whil
/*************************************************/
closedir(dir); // Close directory stream
return flist;
}
void FreeFlist(FILELIST_PTR flist) {
if (flist == NULL) return;
if (flist->base != NULL)
free(flist->base);
if (flist->offset != NULL)
free(flist->offset);
free(flist);
}
void PrintFlist(FILELIST_PTR flist) {
if (flist == NULL) return;
for (int i = 0; i < flist->num; i++){
r_printf("%s \n", flist->offset[i]);
}
}
#else
static char _dummy = 'c';
#endif
#include "abc_000_warning.h"
|
4daed02e80fdbcac8ffc548ac1d95d411d85bf29
|
0f76e9f2c2f30ef14e4b8c67fe605cc19a26920d
|
/Include.win32/yaz/cql.h
|
a6c181536e20415a0c6e48a453a3697ad6da3e87
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
textbrowser/biblioteq
|
f9ef6f756059b010488f7b8c9f35285c88361f90
|
77ff19bec088b6f40321feab44f91ce25778bbea
|
refs/heads/master
| 2023-08-31T00:08:02.575660
| 2023-08-28T15:54:35
| 2023-08-28T15:54:35
| 31,837,087
| 199
| 51
|
NOASSERTION
| 2023-09-14T11:21:21
| 2015-03-08T03:33:27
|
C++
|
UTF-8
|
C
| false
| false
| 15,140
|
h
|
cql.h
|
/* This file is part of the YAZ toolkit.
* Copyright (C) Index Data.
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Index Data nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/** \file cql.h
\brief Header with public definitions about CQL.
*/
#ifndef CQL_H_INCLUDED
#define CQL_H_INCLUDED
#include <stdio.h>
#include <yaz/nmem.h>
#include <yaz/wrbuf.h>
YAZ_BEGIN_CDECL
/** \brief CQL parser handle (opaque pointer) */
typedef struct cql_parser *CQL_parser;
/** \brief creates a CQL parser.
\returns CCL parser
Returns CQL parser or NULL if parser could not be created.
*/
YAZ_EXPORT
CQL_parser cql_parser_create(void);
/** \brief destroys a CQL parser.
\param cp CQL parser
This function does nothing if NULL if received.
*/
YAZ_EXPORT
void cql_parser_destroy(CQL_parser cp);
/** \brief parses a CQL query (string)
\param cp CQL parser
\param str CQL string
\retval 0 success
\retval !=0 failure
*/
YAZ_EXPORT
int cql_parser_string(CQL_parser cp, const char *str);
/** \brief parses CQL query (query stream)
\param cp CQL parser
\param getbyte function which reads one character from stream
\param ungetbyte function which unreads one character from stream
\param client_data data to be passed to stream functions
\retval 0 success
\retval !=0 failure
This function is similar to cql_parser_string but takes a
functions to read each query character from a stream.
The functions pointers getbytes, ungetbyte are similar to
that known from stdios getc, ungetc.
*/
YAZ_EXPORT
int cql_parser_stream(CQL_parser cp,
int (*getbyte)(void *client_data),
void (*ungetbyte)(int b, void *client_data),
void *client_data);
/** \brief parses CQL query (from FILE)
\param cp CQL parser
\param f file where query is read from
\retval 0 success
\retval !=0 failure
This function is similar to cql_parser_string but reads from
stdio FILE handle instead.
*/
YAZ_EXPORT
int cql_parser_stdio(CQL_parser cp, FILE *f);
/** \brief configures strict mode
\param cp CQL parser
\param mode 1=enable strict mode, 0=disable strict mode
This function is similar to cql_parser_string but reads from
stdio FILE handle instead.
*/
YAZ_EXPORT
void cql_parser_strict(CQL_parser cp, int mode);
/** \brief Node type: search term */
#define CQL_NODE_ST 1
/** \brief Node type: boolean */
#define CQL_NODE_BOOL 2
/** \brief Node type: sortby single spec */
#define CQL_NODE_SORT 3
/** \brief CQL parse tree (node)
*/
struct cql_node {
/** node type */
int which;
union {
/** which == CQL_NODE_ST */
struct {
/** CQL index */
char *index;
/** CQL index URI or NULL if no URI */
char *index_uri;
/** Search term */
char *term;
/** relation */
char *relation;
/** relation URL or NULL if no relation URI) */
char *relation_uri;
/** relation modifiers */
struct cql_node *modifiers;
/** term list */
struct cql_node *extra_terms;
} st;
/** which == CQL_NODE_BOOL */
struct {
/** operator name "and", "or", ... */
char *value;
/** left operand */
struct cql_node *left;
/** right operand */
struct cql_node *right;
/** modifiers (NULL for no list) */
struct cql_node *modifiers;
} boolean;
/** which == CQL_NODE_SORT */
struct {
char *index;
/** next spec */
struct cql_node *next;
/** modifiers (NULL for no list) */
struct cql_node *modifiers;
/** search node */
struct cql_node *search;
} sort;
} u;
};
/** \brief Private structure that describes the CQL properties (profile)
*/
struct cql_properties;
/** \brief Structure used by cql_buf_write_handler
*/
struct cql_buf_write_info {
int max;
int off;
char *buf;
};
/** \brief Handler for cql_buf_write_info
*/
YAZ_EXPORT
void cql_buf_write_handler(const char *b, void *client_data);
/** \brief Prints a CQL node and all sub nodes.
Hence this function prints the parse tree which is as returned by
cql_parser_result.
*/
YAZ_EXPORT
void cql_node_print(struct cql_node *cn);
/** \brief creates a search clause node (st). */
YAZ_EXPORT
struct cql_node *cql_node_mk_sc(NMEM nmem, const char *index,
const char *relation, const char *term);
/** \brief applies a prefix+uri to "unresolved" index and relation URIs.
"unresolved" URIs are those nodes where member index_uri / relation_uri
is NULL.
*/
YAZ_EXPORT
struct cql_node *cql_apply_prefix(NMEM nmem, struct cql_node *cn,
const char *prefix, const char *uri);
/** \brief creates a boolean node. */
YAZ_EXPORT
struct cql_node *cql_node_mk_boolean(NMEM nmem, const char *op);
/** \brief creates a sort single spec node. */
YAZ_EXPORT
struct cql_node *cql_node_mk_sort(NMEM nmem, const char *index,
struct cql_node *modifiers);
/** \brief destroys a node and its children. */
YAZ_EXPORT
void cql_node_destroy(struct cql_node *cn);
/** duplicates a node (returns a copy of supplied node) . */
YAZ_EXPORT
struct cql_node *cql_node_dup (NMEM nmem, struct cql_node *cp);
/** \brief returns the parse tree of the most recently parsed CQL query.
\param cp CQL parser
\returns CQL node or NULL for failure
*/
YAZ_EXPORT
struct cql_node *cql_parser_result(CQL_parser cp);
/** \brief returns the sortby tree of the most recently parsed CQL query.
\param cp CQL parser
\returns CQL node or NULL for failure
*/
YAZ_EXPORT
struct cql_node *cql_parser_sort_result(CQL_parser cp);
/** \brief converts CQL tree to XCQL and writes to user-defined stream
\param cn CQL node (tree)
\param pr print function
\param client_data data to be passed to pr function
*/
YAZ_EXPORT
void cql_to_xml(struct cql_node *cn,
void (*pr)(const char *buf, void *client_data),
void *client_data);
/** \brief converts CQL tree to XCQL and writes to file
\param cn CQL node (tree)
\param f file handle
*/
YAZ_EXPORT
void cql_to_xml_stdio(struct cql_node *cn, FILE *f);
/** \brief converts CQL tree to XCQL and writes result to buffer
\param cn CQL node (tree)
\param out buffer
\param max size of buffer (max chars to write)
\returns length of resulting buffer
*/
YAZ_EXPORT
int cql_to_xml_buf(struct cql_node *cn, char *out, int max);
/** \brief converts CQL tree to CCL and writes to user-defined stream
\param cn CQL node (tree)
\param pr print function
\param client_data data to be passed to pr function
*/
YAZ_EXPORT
int cql_to_ccl(struct cql_node *cn,
void (*pr)(const char *buf, void *client_data),
void *client_data);
/** \brief converts CQL tree to CCL and writes to file
\param cn CQL node (tree)
\param f file handle
*/
YAZ_EXPORT
void cql_to_ccl_stdio(struct cql_node *cn, FILE *f);
/** \brief converts CQL tree to CCL and writes result to buffer
\param cn CQL node (tree)
\param out buffer
\param max size of buffer (max chars to write)
\retval 0 OK
\retval -1 conversion error
\retval -2 buffer too small (truncated)
*/
YAZ_EXPORT
int cql_to_ccl_buf(struct cql_node *cn, char *out, int max);
/** \brief stream handle for file (used by cql_to_xml_stdio) */
YAZ_EXPORT
void cql_fputs(const char *buf, void *client_data);
/** \brief CQL transform handle.
The transform describes how to convert from CQL to PQF (Type-1 AKA RPN).
*/
typedef struct cql_transform_t_ *cql_transform_t;
/** \brief creates a CQL transform handle
\returns transform handle or NULL for failure
*/
YAZ_EXPORT
cql_transform_t cql_transform_create(void);
/** \brief creates a CQL transform handle from am opened file handle
\param f file where transformation spec is read
\returns transform handle or NULL for failure
The transformation spec is read from a FILE handle which is assumed
opened for reading.
*/
YAZ_EXPORT
cql_transform_t cql_transform_open_FILE (FILE *f);
/** \brief creates a CQL transform handle from a file
\param fname name of where transformation spec is read
\returns transform handle or NULL for failure
*/
YAZ_EXPORT
cql_transform_t cql_transform_open_fname(const char *fname);
/** \brief defines CQL transform pattern
\param ct CQL transform handle
\param pattern pattern string
\param value pattern value
\returns 0 for succes; -1 for failure
*/
YAZ_EXPORT
int cql_transform_define_pattern(cql_transform_t ct, const char *pattern,
const char *value);
/** \brief destroys a CQL transform handle
\param ct CQL transform handle
*/
YAZ_EXPORT
void cql_transform_close(cql_transform_t ct);
/** \brief tranforms PQF given a CQL tree (NOT re-entrant)
\param ct CQL transform handle
\param cn CQL node tree
\param pr print function
\param client_data data to be passed to pr
\retval 0 success
\retval != 0 error
The result is written to a user-defined stream.
*/
YAZ_EXPORT
int cql_transform(cql_transform_t ct,
struct cql_node *cn,
void (*pr)(const char *buf, void *client_data),
void *client_data);
/** \brief tranforms PQF given a CQL tree (re-entrant)
\param ct CQL transform handle
\param cn CQL node tree
\param addinfo additional information (if error)
\param pr print function
\param client_data data to be passed to pr
\retval 0 success
\retval != 0 error code
The result is written to a user-defined stream.
*/
YAZ_EXPORT
int cql_transform_r(cql_transform_t ct, struct cql_node *cn,
WRBUF addinfo,
void (*pr)(const char *buf, void *client_data),
void *client_data);
/** \brief transforms PQF given a CQL tree from FILE (not re-entrant)
\param ct CQL transform handle
\param cn CQL tree
\param f FILE where output is written
\retval 0 success
\retval !=0 failure (error code)
The result is written to a file specified by FILE handle (which must
be opened for writing.
*/
YAZ_EXPORT
int cql_transform_FILE(cql_transform_t ct,
struct cql_node *cn, FILE *f);
/** \brief transforms PQF given a CQL tree from buffer (not re-entrant)
\param ct CQL transform handle
\param cn CQL tree
\param out buffer for output
\param max maximum bytes for output (size of buffer)
\retval 0 success
\retval !=0 failure (error code)
*/
YAZ_EXPORT
int cql_transform_buf(cql_transform_t ct,
struct cql_node *cn, char *out, int max);
/** \brief returns additional information for last transform
\param ct CQL transform handle
\param addinfo additional info (result)
\returns error code
*/
YAZ_EXPORT
int cql_transform_error(cql_transform_t ct, const char **addinfo);
/** \brief sets error and addinfo for transform
\param ct CQL transform handle
\param error error code
\param addinfo additional info
*/
YAZ_EXPORT
void cql_transform_set_error(cql_transform_t ct, int error, const char *addinfo);
/** \brief returns the CQL message corresponding to a given error code.
\param code error code
\returns text message
*/
YAZ_EXPORT
const char *cql_strerror(int code);
/** \brief returns the standard CQL context set URI.
\returns CQL URI string
*/
YAZ_EXPORT
const char *cql_uri(void);
/** \brief compares two CQL strings (ala strcmp)
\param s1 string 1
\param s2 string 2
\returns comparison value
Compares two CQL strings (for relations, operators, etc)
(unfortunately defined as case-insensitive unlike XML etc)
*/
YAZ_EXPORT
int cql_strcmp(const char *s1, const char *s2);
/** \brief compares two CQL strings (ala strncmp)
\param s1 string 1
\param s2 string 2
\param n size
\returns comparison value
Compares two CQL strings at most n bytes
(unfortunately defined as case-insensitive unlike XML etc)
*/
YAZ_EXPORT
int cql_strncmp(const char *s1, const char *s2, size_t n);
/** \brief converts CQL sortby to sortkeys (ala versions 1.1)
\param cn CQL tree
\param pr print function
\param client_data data to be passed to pr function
This will take CQL_NODE_SORT entries and conver them to
path,schema,ascending,caseSensitive,missingValue
items..
One for each sort keys. Where
path is string index for sorting
schema is schema for sort index
ascending is a boolean (0=false, 1=true). Default is true.
caseSensitive is a boolean. Default is false.
missingValue is a string and one of 'abort', 'highValue', 'lowValue',
or 'omit'. Default is 'highValue'.
See also
http://www.loc.gov/standards/sru/sru-1-1.html#sort
*/
YAZ_EXPORT
int cql_sortby_to_sortkeys(struct cql_node *cn,
void (*pr)(const char *buf, void *client_data),
void *client_data);
/** \brief converts CQL sortby to sortkeys ..
\param cn CQL tree
\param out result buffer
\param max size of buffer (allocated)
\retval 0 OK
\retval -1 ERROR
*/
YAZ_EXPORT
int cql_sortby_to_sortkeys_buf(struct cql_node *cn, char *out, int max);
YAZ_END_CDECL
#endif
/* CQL_H_INCLUDED */
/*
* Local variables:
* c-basic-offset: 4
* c-file-style: "Stroustrup"
* indent-tabs-mode: nil
* End:
* vim: shiftwidth=4 tabstop=8 expandtab
*/
|
cbb10590b4c93cedd03c9f83d4db84eaf50c354d
|
505e17d8e6e6d52461105b538d8e4d64b8d8d0f2
|
/installer/fetcher/version.h
|
dd6f54d1e15c33b386b2bf1ba51e8367425a278a
|
[
"MIT"
] |
permissive
|
WireGuard/wireguard-windows
|
80e18635cb888c16924bc3485bb6d1658e294516
|
dcc0eb72a04ba2c0c83d29bd621a7f66acce0a23
|
refs/heads/master
| 2023-08-18T19:12:38.648497
| 2023-01-20T09:02:27
| 2023-01-23T13:22:34
| 169,113,242
| 1,387
| 290
|
MIT
| 2023-09-07T22:49:40
| 2019-02-04T16:53:42
|
Go
|
UTF-8
|
C
| false
| false
| 215
|
h
|
version.h
|
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2020-2022 Jason A. Donenfeld. All Rights Reserved.
*/
#ifndef _VERSION_H
#define _VERSION_H
#define VERSION_STR "1.0"
#define VERSION_ARRAY 1,0,0,0
#endif
|
8a4b9a73ae6a18ee80b3f25d16fb0b68efe710f4
|
f367e4b66a1ee42e85830b31df88f63723c36a47
|
/lib/wasm-micro-runtime-WAMR-1.2.2/core/app-framework/app-native-shared/bi-inc/shared_utils.h
|
8155ea1f7fc9b10942289ea25e5e8cd432358779
|
[
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
fluent/fluent-bit
|
06873e441162b92941024e9a7e9e8fc934150bf7
|
1a41f49dc2f3ae31a780caa9ffd6137b1d703065
|
refs/heads/master
| 2023-09-05T13:44:55.347372
| 2023-09-05T10:14:33
| 2023-09-05T10:14:33
| 29,933,948
| 4,907
| 1,565
|
Apache-2.0
| 2023-09-14T10:17:02
| 2015-01-27T20:41:52
|
C
|
UTF-8
|
C
| false
| false
| 3,382
|
h
|
shared_utils.h
|
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SHARED_UTILS_H_
#define _SHARED_UTILS_H_
#include "bh_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
#define FMT_ATTR_CONTAINER 99
#define FMT_APP_RAW_BINARY 98
/* the request structure */
typedef struct request {
// message id
uint32 mid;
// url of the request
char *url;
// action of the request, can be PUT/GET/POST/DELETE
int action;
// payload format, currently only support attr_container_t type
int fmt;
// payload of the request, currently only support attr_container_t type
void *payload;
// length in bytes of the payload
int payload_len;
// sender of the request
unsigned long sender;
} request_t;
/* the response structure */
typedef struct response {
// message id
uint32 mid;
// status of the response
int status;
// payload format
int fmt;
// payload of the response,
void *payload;
// length in bytes of the payload
int payload_len;
// receiver of the response
unsigned long reciever;
} response_t;
int
check_url_start(const char *url, int url_len, const char *leading_str);
bool
match_url(char *pattern, char *matched);
char *
find_key_value(char *buffer, int buffer_len, char *key, char *value,
int value_len, char delimiter);
request_t *
clone_request(request_t *request);
void
request_cleaner(request_t *request);
response_t *
clone_response(response_t *response);
void
response_cleaner(response_t *response);
/**
* @brief Set fields of response.
*
* @param response pointer of the response to be set
* @param status status of response
* @param fmt format of the response payload
* @param payload payload of the response
* @param payload_len length in bytes of the response payload
*
* @return pointer to the response
*
* @warning the response pointer MUST NOT be NULL
*/
response_t *
set_response(response_t *response, int status, int fmt, const char *payload,
int payload_len);
/**
* @brief Make a response for a request.
*
* @param request pointer of the request
* @param response pointer of the response to be made
*
* @return pointer to the response
*
* @warning the request and response pointers MUST NOT be NULL
*/
response_t *
make_response_for_request(request_t *request, response_t *response);
/**
* @brief Initialize a request.
*
* @param request pointer of the request to be initialized
* @param url url of the request
* @param action action of the request
* @param fmt format of the request payload
* @param payload payload of the request
* @param payload_len length in bytes of the request payload
*
* @return pointer to the request
*
* @warning the request pointer MUST NOT be NULL
*/
request_t *
init_request(request_t *request, char *url, int action, int fmt, void *payload,
int payload_len);
char *
pack_request(request_t *request, int *size);
request_t *
unpack_request(char *packet, int size, request_t *request);
char *
pack_response(response_t *response, int *size);
response_t *
unpack_response(char *packet, int size, response_t *response);
void
free_req_resp_packet(char *packet);
char *
wa_strdup(const char *str);
#ifdef __cplusplus
}
#endif
#endif /* end of _SHARED_UTILS_H_ */
|
a0b76c9dd259efac422afe56a572ce14fc421739
|
315f6b255a9c47af66675b0c5cc218295ca57d89
|
/source/map/map.c
|
d9c99f1c3ebd4e7d6dd008182e72e6524820cbf5
|
[
"MIT"
] |
permissive
|
cppchriscpp/nes-starter-kit
|
b666ea0089b99e66761ef5c676fd6a63f3d02900
|
6c0515ed91e157bd82d3652b09b31b36da4533a6
|
refs/heads/master
| 2023-08-22T05:37:16.765304
| 2023-02-19T22:18:02
| 2023-02-19T22:18:02
| 130,593,134
| 153
| 19
|
MIT
| 2022-07-26T23:53:39
| 2018-04-22T17:55:57
|
C
|
UTF-8
|
C
| false
| false
| 29,119
|
c
|
map.c
|
#include "source/map/map.h"
#include "source/map/load_map.h"
#include "source/neslib_asm/neslib.h"
#include "source/library/bank_helpers.h"
#include "source/configuration/game_states.h"
#include "source/globals.h"
#include "source/configuration/system_constants.h"
#include "source/graphics/palettes.h"
#include "source/graphics/hud.h"
#include "source/graphics/fade_animation.h"
#include "source/sprites/player.h"
#include "source/sprites/sprite_definitions.h"
#include "source/sprites/map_sprites.h"
#include "source/menus/error.h"
CODE_BANK(PRG_BANK_MAP_LOGIC);
ZEROPAGE_DEF(unsigned char, playerOverworldPosition);
ZEROPAGE_DEF(int, xScrollPosition);
ZEROPAGE_DEF(int, yScrollPosition);
unsigned char currentMap[256];
unsigned char assetTable[0x38];
unsigned char currentMapSpriteData[(16 * MAP_MAX_SPRITES)];
unsigned char currentMapSpritePersistance[64];
unsigned char mapScreenBuffer[0x55];
void init_map(void) {
// Make sure we're looking at the right sprite and chr data, not the ones for the menu.
set_chr_bank_0(CHR_BANK_TILES);
set_chr_bank_1(CHR_BANK_SPRITES);
// Also set the palettes to the in-game palettes.
pal_bg(mainBgPalette);
pal_spr(mainSpritePalette);
// Do some trickery to make the HUD show up at the top of the screen, with the map slightly below.
scroll(0, 240-HUD_PIXEL_HEIGHT);
set_mirroring(MIRROR_MODE_VERTICAL);
}
// Reusing a few temporary vars for the sprite function below.
#define currentValue tempInt1
#define spritePosition tempChar4
#define spriteDefinitionIndex tempChar5
#define mapSpriteDataIndex tempChar6
#define tempArrayIndex tempInt3
// Load the sprites from the current map
void load_sprites(void) {
for (i = 0; i != MAP_MAX_SPRITES; ++i) {
// Each sprite has just 2 bytes stored. The first is the location, and the 2nd is the sprite id in spriteDefinitions.
spriteDefinitionIndex = currentMap[(MAP_DATA_TILE_LENGTH + 1) + (i<<1)]<<SPRITE_DEF_SHIFT;
mapSpriteDataIndex = i << MAP_SPRITE_DATA_SHIFT;
spritePosition = currentMap[(MAP_DATA_TILE_LENGTH) + (i<<1)];
if (spritePosition != 255 && !(currentMapSpritePersistance[playerOverworldPosition] & bitToByte[i])) {
// Get X converted to our extended 16-bit int size.
currentValue = (spritePosition & 0x0f) << 8;
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_X] = (currentValue & 0xff);
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_X+1] = (currentValue >> 8);
// Now do the same with Y (Which is already shifted 4 bits with the way we store this)
// Note that due to weirdness with the NES and scrolling/the HUD, sprites will appear 1 px above where you'd expect
// from this math. The one being subtracted from HUD_PIXEL_HEIGHT adjusts for that pixel.
currentValue = ((spritePosition & 0xf0) << 4) + ((HUD_PIXEL_HEIGHT-1) << SPRITE_POSITION_SHIFT);
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_Y] = (currentValue & 0xff);
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_Y+1] = (currentValue >> 8);
// Copy the simple bytes from the sprite definition to someplace more easily accessible (and modify-able!)
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_TILE_ID] = spriteDefinitions[spriteDefinitionIndex + SPRITE_DEF_POSITION_TILE_ID];
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_TYPE] = spriteDefinitions[spriteDefinitionIndex + SPRITE_DEF_POSITION_TYPE];
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_SIZE_PALETTE] = spriteDefinitions[spriteDefinitionIndex + SPRITE_DEF_POSITION_SIZE_PALETTE];
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_HEALTH] = spriteDefinitions[spriteDefinitionIndex + SPRITE_DEF_POSITION_HEALTH];
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_ANIMATION_TYPE] = spriteDefinitions[spriteDefinitionIndex + SPRITE_DEF_POSITION_ANIMATION_TYPE];
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_MOVEMENT_TYPE] = spriteDefinitions[spriteDefinitionIndex + SPRITE_DEF_POSITION_MOVEMENT_TYPE];
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_MOVE_SPEED] = spriteDefinitions[spriteDefinitionIndex + SPRITE_DEF_POSITION_MOVE_SPEED];
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_DAMAGE] = spriteDefinitions[spriteDefinitionIndex + SPRITE_DEF_POSITION_DAMAGE];
} else {
// Go away
currentMapSpriteData[mapSpriteDataIndex + MAP_SPRITE_DATA_POS_TYPE] = SPRITE_TYPE_OFFSCREEN;
}
}
}
// Clears the asset table. Set containsHud to 1 to set the HUD bytes to use palette 4 (will break the coloring logic if you use the
// last few rows for the map.)
void clear_asset_table(containsHud) {
// Loop over assetTable to clear it out.
for (i = 0; i != sizeof(assetTable) - 8; ++i) {
assetTable[i] = 0x00;
}
// The last row of the asset table uses the 4th palette to show the HUD correctly.
for (; i != sizeof(assetTable); ++i) {
assetTable[i] = containsHud == 0 ? 0x00 : 0xff;
}
}
// Clears the asset table like we do above, but leaves the first row (top *half* of the asset table) blank.
// Used for proper scrolling animation, since we end up flip-flopping on which row we're on during the scrolling up animation.
void clear_asset_table_skip_top(void) {
clear_asset_table(0);
return;
// Loop over assetTable to clear it out.
for (i = 0; i != sizeof(assetTable) - 16; ++i) {
assetTable[i] = 0x00;
}
for (; i != sizeof(assetTable) - 8; ++i) {
assetTable[i] = assetTable[i] & 0xf0;
}
// The last row of the asset table uses the 4th palette to show the HUD correctly.
for (; i != sizeof(assetTable); ++i) {
assetTable[i] = 0x00;
}
}
// Loads the assets from assetTable (for the row *ending* with j) into mapScreenBuffer
// at tempArrayIndex.
void load_palette_to_map_screen_buffer(int attributeTableAdr) {
mapScreenBuffer[tempArrayIndex++] = MSB(attributeTableAdr + j - 7) | NT_UPD_HORZ;
mapScreenBuffer[tempArrayIndex++] = LSB(attributeTableAdr + j - 7);
mapScreenBuffer[tempArrayIndex++] = 8;
// Using an unrolled loop to save a bit of RAM - not like we need it really.
mapScreenBuffer[tempArrayIndex++] = assetTable[j-7];
mapScreenBuffer[tempArrayIndex++] = assetTable[j-6];
mapScreenBuffer[tempArrayIndex++] = assetTable[j-5];
mapScreenBuffer[tempArrayIndex++] = assetTable[j-4];
mapScreenBuffer[tempArrayIndex++] = assetTable[j-3];
mapScreenBuffer[tempArrayIndex++] = assetTable[j-2];
mapScreenBuffer[tempArrayIndex++] = assetTable[j-1];
mapScreenBuffer[tempArrayIndex++] = assetTable[j];
mapScreenBuffer[tempArrayIndex++] = NT_UPD_EOF;
}
// Now based on where we are in the map, shift them appropriately.
// This builds up the palette bytes - which comprise of 2 bits per 16x16 tile. It's a bit confusing...
void update_asset_table_based_on_current_value(unsigned char reverseAttributes) {
if ((i & 0x01) == 0) {
// Even/left
if (((i >> 4) & 0x01) == reverseAttributes) {
// top
currentValue >>= 6;
} else {
//bottom
currentValue >>= 2;
}
} else {
// Odd/right
if (((i >> 4) & 0x01) == reverseAttributes) {
// Top
currentValue >>= 4;
} else {
// Bottom
currentValue >>= 0;
}
}
assetTable[j] += currentValue;
}
// We need to reuse some variables here to save on memory usage. So, use #define to give them readable names.
// Note that this is ONLY a rename; if something relies on the original variable, that impacts this one too.
#define currentMemoryLocation tempInt2
// NOTE: tempChar1-tempChar3 are in use by update_player_sprite, which we call here. (Confusing, I know...)
#define bufferIndex tempChar8
#define otherLoopIndex tempChar9
// reverseAttributes: If set to 1, this will flip which bits are used for the top and the bottom palette in the attribute table.
// This allows us to correctly draw starting on an odd-numbered row (such as at the start of our HUD.)
void draw_current_map_to_nametable(int nametableAdr, int attributeTableAdr, unsigned char reverseAttributes) {
// Prepare to draw on the first nametable
set_vram_update(NULL);
bufferIndex = 0;
if (!reverseAttributes) {
j = -1;
} else {
j = 7;
}
tempArrayIndex = NAMETABLE_UPDATE_PREFIX_LENGTH;
for (i = 0; i != 192; ++i) {
// The top 2 bytes of map data are palette data. Skip that for now.
currentValue = currentMap[i] & 0x3f;
// This bumps the tile id up from the id for a 16x16 tile to an 8x8 tile on the real map
currentValue = (((currentValue & 0xf8)) << 2) + ((currentValue & 0x07) << 1);
if (bufferIndex == 0) {
currentMemoryLocation = nametableAdr + ((i & 0xf0) << 2) + ((i % 16) << 1);
}
mapScreenBuffer[tempArrayIndex] = currentValue;
mapScreenBuffer[tempArrayIndex + 1] = currentValue + 1;
mapScreenBuffer[tempArrayIndex + 32] = currentValue + 16;
mapScreenBuffer[tempArrayIndex + 33] = currentValue + 17;
// okay, now we have to update the byte for palettes. This is going to look a bit messy...
// Start with the top 2 bytes
currentValue = currentMap[i] & 0xc0;
// Update where we are going to update with the palette data, which we store in the buffer.
if ((i & 0x1f) == (reverseAttributes ? 0 : 16))
j -= 8;
if ((i & 0x01) == 0)
j++;
// Now based on where we are in the map, shift them appropriately.
// This builds up the palette bytes - which comprise of 2 bits per 16x16 tile. It's a bit confusing...
update_asset_table_based_on_current_value(reverseAttributes);
// Every 16 frames, write the buffered data to the screen and start anew.
++bufferIndex;
tempArrayIndex += 2;
if (bufferIndex == 16) {
bufferIndex = 0;
tempArrayIndex = NAMETABLE_UPDATE_PREFIX_LENGTH;
// Bunch of messy-looking stuff that tells neslib where to write this to the nametable, and how.
mapScreenBuffer[0] = MSB(currentMemoryLocation) | NT_UPD_HORZ;
mapScreenBuffer[1] = LSB(currentMemoryLocation);
mapScreenBuffer[2] = 65;
mapScreenBuffer[64 + NAMETABLE_UPDATE_PREFIX_LENGTH + 1] = NT_UPD_EOF;
set_vram_update(mapScreenBuffer);
ppu_wait_nmi();
if (xScrollPosition != -1) {
split_y(xScrollPosition, yScrollPosition);
}
set_vram_update(NULL);
}
}
// Draw the palette that we built up above.
// Start by copying it into mapScreenBuffer, so we can tell neslib where this lives.
for (i = 0; i != 0x38; ++i) {
mapScreenBuffer[NAMETABLE_UPDATE_PREFIX_LENGTH + i] = assetTable[i];
}
mapScreenBuffer[0] = MSB(attributeTableAdr) | NT_UPD_HORZ;
mapScreenBuffer[1] = LSB(attributeTableAdr);
mapScreenBuffer[2] = 0x38;
mapScreenBuffer[0x3b] = NT_UPD_EOF;
set_vram_update(mapScreenBuffer);
ppu_wait_nmi();
if (xScrollPosition != -1) {
split_y(xScrollPosition, yScrollPosition);
}
set_vram_update(NULL);
}
// Draw a row (technically two rows) of tiles onto the map. Breaks things up so we can hide
// the change behind the HUD while continuing to use vertical mirroring.
// This basically is the draw_current_map_to_nametable logic, but it stops after 32.
// NOTE: i and j MUST be maintained between calls to this method.
void draw_individual_row(int nametableAdr, int attributeTableAdr, char oliChange) {
while(1) {
// The top 2 bytes of map data are palette data. Skip that for now.
currentValue = currentMap[i] & 0x3f;
// This bumps the tile id up from the id for a 16x16 tile to an 8x8 tile on the real map
currentValue = (((currentValue >> 3)) << 5) + ((currentValue % 8) << 1);
if (bufferIndex == 0) {
currentMemoryLocation = nametableAdr + ((i / 16) << 6) + ((i % 16) << 1);
}
// Figure out where to update the map, then store it so we don't keep calculating it.
tempArrayIndex = NAMETABLE_UPDATE_PREFIX_LENGTH + (bufferIndex<<1);
// Draw it to the map
mapScreenBuffer[tempArrayIndex] = currentValue;
mapScreenBuffer[tempArrayIndex + 1] = currentValue + 1;
mapScreenBuffer[tempArrayIndex + 32] = currentValue + 16;
mapScreenBuffer[tempArrayIndex + 33] = currentValue + 17;
// okay, now we have to update the byte for palettes. This is going to look a bit messy...
// Start with the top 2 bits
currentValue = currentMap[i] & 0xc0;
// Update where we are going to update with the palette data, which we store in the buffer.
if (i % 32 == 16)
j -= 8;
if ((i & 0x01) == 0)
j++;
// Now based on where we are in the map, shift them appropriately.
// This builds up the palette bytes - which comprise of 2 bits per 16x16 tile. It's a bit confusing...
update_asset_table_based_on_current_value(0);
// Every 16 frames, write the buffered data to the screen and start anew.
++bufferIndex;
if (bufferIndex == 8) {
ppu_wait_nmi();
if (xScrollPosition != -1) {
otherLoopIndex += oliChange;
scroll(0, 240 - HUD_PIXEL_HEIGHT);
split_y(256, 240 + 48 + otherLoopIndex);
}
}
if (bufferIndex == 16) {
bufferIndex = 0;
// Bunch of messy-looking stuff that tells neslib where to write this to the nametable, and how.
mapScreenBuffer[0] = MSB(currentMemoryLocation) | NT_UPD_HORZ;
mapScreenBuffer[1] = LSB(currentMemoryLocation);
mapScreenBuffer[2] = 64;
// We wrote the 64 tiles in the loop above; they're ready to go.
// Add in another update for the palette
tempArrayIndex = 64 + NAMETABLE_UPDATE_PREFIX_LENGTH;
load_palette_to_map_screen_buffer(attributeTableAdr);
set_vram_update(mapScreenBuffer);
ppu_wait_nmi();
if (xScrollPosition != -1) {
scroll(0, 240 - HUD_PIXEL_HEIGHT);
split_y(256, 240 + 48 + otherLoopIndex);
}
set_vram_update(NULL);
}
++i;
if (i % 32 == 0) {
break;
}
}
}
// The same method as above, but offset slightly on y to allow for smooth scrolling up.
void draw_individual_row_offset_y(int nametableAdr, int attributeTableAdr, char oliChange) {
while(1) {
// The top 2 bytes of map data are palette data. Skip that for now.
currentValue = currentMap[i] & 0x3f;
// This bumps the tile id up from the id for a 16x16 tile to an 8x8 tile on the real map
currentValue = (((currentValue >> 3)) << 5) + ((currentValue % 8) << 1);
if (bufferIndex == 0) {
currentMemoryLocation = nametableAdr + ((i / 16) << 6) + ((i % 16) << 1);
}
// Figure out where to update the map, then store it so we don't keep calculating it.
tempArrayIndex = NAMETABLE_UPDATE_PREFIX_LENGTH + (bufferIndex<<1);
// Draw it to the map
mapScreenBuffer[tempArrayIndex] = currentValue;
mapScreenBuffer[tempArrayIndex + 1] = currentValue + 1;
mapScreenBuffer[tempArrayIndex + 32] = currentValue + 16;
mapScreenBuffer[tempArrayIndex + 33] = currentValue + 17;
// okay, now we have to update the byte for palettes. This is going to look a bit messy...
// Start with the top 2 bits
currentValue = currentMap[i] & 0xc0;
// Update where we are going to update with the palette data, which we store in the buffer.
if (i % 32 == 0)
j -= 8;
if ((i & 0x01) == 0)
j++;
// Now based on where we are in the map, shift them appropriately.
// This builds up the palette bytes - which comprise of 2 bits per 16x16 tile. It's a bit confusing...
update_asset_table_based_on_current_value(1);
// Every 16 frames, write the buffered data to the screen and start anew.
++bufferIndex;
if (bufferIndex == 8) {
ppu_wait_nmi();
if (xScrollPosition != -1) {
otherLoopIndex += oliChange;
scroll(0, 240 - HUD_PIXEL_HEIGHT);
split_y(256, 240 - otherLoopIndex);
}
}
if (bufferIndex == 16) {
bufferIndex = 0;
// Bunch of messy-looking stuff that tells neslib where to write this to the nametable, and how.
mapScreenBuffer[0] = MSB(currentMemoryLocation) | NT_UPD_HORZ;
mapScreenBuffer[1] = LSB(currentMemoryLocation);
mapScreenBuffer[2] = 64;
// We wrote the 64 tiles in the loop above; they're ready to go.
mapScreenBuffer[63 + NAMETABLE_UPDATE_PREFIX_LENGTH + 1] = NT_UPD_EOF;
set_vram_update(mapScreenBuffer);
ppu_wait_nmi();
if (xScrollPosition != -1) {
scroll(0, 240 - HUD_PIXEL_HEIGHT);
split_y(256, 240 - otherLoopIndex);
}
set_vram_update(NULL);
}
++i;
if (i % 32 == 0) {
// Add in another update for the palette
tempArrayIndex = 0;
load_palette_to_map_screen_buffer(attributeTableAdr);
set_vram_update(mapScreenBuffer);
ppu_wait_nmi();
if (xScrollPosition != -1) {
scroll(0, 240 - HUD_PIXEL_HEIGHT);
split_y(256, 240 - otherLoopIndex);
}
set_vram_update(NULL);
break;
}
}
}
void draw_current_row_palette_only(int attributeTableAdr) {
while(1) {
// Get just the palette bits from this map tile
currentValue = currentMap[i] & 0xc0;
// Update where we are going to update with the palette data, which we store in the buffer.
if (i % 32 == 0)
j -= 8;
if ((i & 0x01) == 0)
j++;
// Now based on where we are in the map, shift them appropriately.
// This builds up the palette bytes - which comprise of 2 bits per 16x16 tile. It's a bit confusing...
// Now based on where we are in the map, shift them appropriately.
// This builds up the palette bytes - which comprise of 2 bits per 16x16 tile. It's a bit confusing...
update_asset_table_based_on_current_value(1);
// Every 16 frames, write the buffered data to the screen and start anew.
++bufferIndex;
++i;
if (i % 32 == 0) {
// Add in another update for the palette
tempArrayIndex = 0;
load_palette_to_map_screen_buffer(attributeTableAdr);
set_vram_update(mapScreenBuffer);
ppu_wait_nmi();
if (xScrollPosition != -1) {
scroll(0, 240 - HUD_PIXEL_HEIGHT);
split_y(256, 240 - otherLoopIndex);
}
set_vram_update(NULL);
break;
}
}
}
void draw_current_map_to_a(void) {
clear_asset_table(1);
xScrollPosition = -1;
yScrollPosition = 0;
draw_current_map_to_nametable(NAMETABLE_A, NAMETABLE_A_ATTRS, 0);
}
void draw_current_map_to_b(void) {
clear_asset_table(0);
xScrollPosition = -1;
yScrollPosition = 0;
draw_current_map_to_nametable(NAMETABLE_B, NAMETABLE_B_ATTRS, 0);
}
void draw_current_map_to_c(void) {
clear_asset_table(0);
xScrollPosition = -1;
yScrollPosition = 0;
draw_current_map_to_nametable(NAMETABLE_C, NAMETABLE_C_ATTRS, 0);
}
void draw_current_map_to_d(void) {
clear_asset_table(0);
xScrollPosition = -1;
yScrollPosition = 0;
draw_current_map_to_nametable(NAMETABLE_D, NAMETABLE_D_ATTRS, 0);
}
// A quick, low-tech glamour-free way to transition between screens.
void do_fade_screen_transition(void) {
load_map();
load_sprites();
clear_asset_table(1);
fade_out_fast();
// Now that the screen is clear, migrate the player's sprite a bit..
if (playerDirection == SPRITE_DIRECTION_LEFT) {
playerXPosition = (SCREEN_EDGE_RIGHT << PLAYER_POSITION_SHIFT);
} else if (playerDirection == SPRITE_DIRECTION_RIGHT) {
playerXPosition = (SCREEN_EDGE_LEFT << PLAYER_POSITION_SHIFT);
} else if (playerDirection == SPRITE_DIRECTION_UP) {
playerYPosition = (SCREEN_EDGE_BOTTOM << PLAYER_POSITION_SHIFT);
} else if (playerDirection == SPRITE_DIRECTION_DOWN) {
playerYPosition = (SCREEN_EDGE_TOP << PLAYER_POSITION_SHIFT);
}
// Actually move the sprite too, since otherwise this won't happen until after we un-blank the screen.
banked_call(PRG_BANK_PLAYER_SPRITE, update_player_sprite);
// Draw the updated map to the screen...
draw_current_map_to_nametable(NAMETABLE_A, NAMETABLE_A_ATTRS, 0);
// Update sprites once to make sure we don't show a flash of the old sprite positions.
banked_call(PRG_BANK_MAP_SPRITES, update_map_sprites);
fade_in_fast();
// Aand we're back!
gameState = GAME_STATE_RUNNING;
}
// Use a scrolling animation to move the player to the next screen.
void do_scroll_screen_transition(void) {
// First, draw the next tile onto b
xScrollPosition = -1;
yScrollPosition = 0;
scroll(0, 240 - HUD_PIXEL_HEIGHT);
// Draw a sprite into 0 to give us something to split on
oam_spr(249, HUD_PIXEL_HEIGHT-NES_SPRITE_HEIGHT-0, HUD_SPRITE_ZERO_TILE_ID, 0x00, 0);
ppu_wait_nmi();
if (playerDirection == SPRITE_DIRECTION_RIGHT) {
load_map();
clear_asset_table(1);
draw_current_map_to_nametable(NAMETABLE_B, NAMETABLE_B_ATTRS, 0);
for (i = 0; i != 254; i+= SCREEN_SCROLL_LOOP_INCREMENT_LR) {
playerXPosition -= SCREEN_SCROLL_MOVEMENT_INCREMENT_LR;
banked_call(PRG_BANK_PLAYER_SPRITE, update_player_sprite);
if (i % SCREEN_SCROLL_SPEED == 0) {
ppu_wait_nmi();
split(i, 0);
}
}
xScrollPosition = 256;
// Now, draw back to our original nametable...
clear_asset_table(1);
load_sprites();
draw_current_map_to_nametable(NAMETABLE_A, NAMETABLE_A_ATTRS, 0);
} else if (playerDirection == SPRITE_DIRECTION_LEFT) {
load_map();
clear_asset_table(1);
draw_current_map_to_nametable(NAMETABLE_B, NAMETABLE_B_ATTRS, 0);
for (i = 0; i != 254; i+= SCREEN_SCROLL_LOOP_INCREMENT_LR) {
playerXPosition += SCREEN_SCROLL_MOVEMENT_INCREMENT_LR;
banked_call(PRG_BANK_PLAYER_SPRITE, update_player_sprite);
if (i % SCREEN_SCROLL_SPEED == 0) {
ppu_wait_nmi();
split(512-i, 0);
}
}
xScrollPosition = 256;
// Now, draw back to our original nametable...
clear_asset_table(1);
load_sprites();
draw_current_map_to_nametable(NAMETABLE_A, NAMETABLE_A_ATTRS, 0);
} else if (playerDirection == SPRITE_DIRECTION_DOWN) {
// First draw original map to the other nametable
clear_asset_table(0);
draw_current_map_to_nametable(NAMETABLE_B + (SCREEN_WIDTH_TILES*6), NAMETABLE_B_ATTRS + 8, 1);
load_map();
// Loop over the screen, drawing the map in the space taken up by the hud every time we go 32 lines (2 tiles)
// NOTE: We use both i and j in the loop inside one of the functions we're calling, so we needed another variable.
clear_asset_table(0);
i = 0;
j = -1;
xScrollPosition = 256;
yScrollPosition = 0;
for (otherLoopIndex = 0; otherLoopIndex < 240 - HUD_PIXEL_HEIGHT; otherLoopIndex += SCREEN_SCROLL_LOOP_INCREMENT_UD) {
playerYPosition -= SCREEN_SCROLL_MOVEMENT_INCREMENT_UD;
banked_call(PRG_BANK_PLAYER_SPRITE, update_player_sprite);
if (otherLoopIndex % 32 == 0 && otherLoopIndex < 224) {
ppu_wait_nmi();
split_y(256, 240 + HUD_PIXEL_HEIGHT + otherLoopIndex);
draw_individual_row(NAMETABLE_B, NAMETABLE_B_ATTRS, SCREEN_SCROLL_LOOP_INCREMENT_UD);
} else {
if ((i % (SCREEN_SCROLL_SPEED*4)) == 0) {
ppu_wait_nmi();
split_y(256, 240 + HUD_PIXEL_HEIGHT + otherLoopIndex);
}
}
}
xScrollPosition = 256;
// Bump otherLoopIndex back to where it was last animation frame; we don't want to kee updating.
otherLoopIndex -= 2;
// Now, draw back to our original nametable...
clear_asset_table(1);
load_sprites();
ppu_wait_nmi();
split_y(256, 240 + HUD_PIXEL_HEIGHT + otherLoopIndex);
draw_current_map_to_nametable(NAMETABLE_A, NAMETABLE_A_ATTRS, 0);
} else if (playerDirection == SPRITE_DIRECTION_UP) {
// First draw original map to the other nametable
clear_asset_table_skip_top();
set_vram_update(NULL);
bufferIndex = 0;
load_map();
xScrollPosition = 0;
yScrollPosition = 0;
// Draw the first line outside the general loop while this line is offscreen.
i = 240 - (48 + 32);
j = (i >> 2) + 7;
otherLoopIndex = 0;
draw_individual_row_offset_y(NAMETABLE_B + (SCREEN_WIDTH_TILES*6), NAMETABLE_B_ATTRS + 8, 0);
// Loop over the screen, drawing the map in the space taken up by the hud every time we go 32 lines (2 tiles)
// NOTE: We use both i and j in the loop inside one of the functions we're calling, so we needed another variable.
i = 0;
j = -1;
for (i = sizeof(assetTable) - 16; i < sizeof(assetTable) - 8 ; ++i) {
assetTable[i] = assetTable[i] & 0xf0;
}
for (otherLoopIndex = 0; otherLoopIndex != 240 - HUD_PIXEL_HEIGHT; otherLoopIndex += SCREEN_SCROLL_LOOP_INCREMENT_UD) {
playerYPosition += SCREEN_SCROLL_MOVEMENT_INCREMENT_UD;
banked_call(PRG_BANK_PLAYER_SPRITE, update_player_sprite);
if (otherLoopIndex % 32 == 0) {
ppu_wait_nmi();
split_y(256, 240 - (otherLoopIndex));
// The 64 here is to hide this behind the hud, since we are drawing while still doing vertical mirroring.
i = 240 - (HUD_PIXEL_HEIGHT + 64 + otherLoopIndex);
// Special case for the asset table - we wrote to this part of it above already; this prevents glitching by adding it twice.
if (i == 0) {
for (j = 0; j < 8; ++j) {
assetTable[j] = assetTable[j] & 0xf0;
}
}
j = (i >> 2) + 7;
draw_individual_row_offset_y(NAMETABLE_B + (SCREEN_WIDTH_TILES*6), NAMETABLE_B_ATTRS + 8, SCREEN_SCROLL_LOOP_INCREMENT_UD);
// Draw the palette for row 0 separately - have to do it here after we've loaded all of the assetTable stuff before.
if (i == 0) {
j = (i >> 2) - 1;
draw_current_row_palette_only(NAMETABLE_B_ATTRS + 8);
}
} else {
if (i % (SCREEN_SCROLL_SPEED<<1) == 0) {
ppu_wait_nmi();
split_y(256, 240 - (otherLoopIndex));
}
}
}
xScrollPosition = 256;
load_sprites();
// Now, draw back to our original nametable...
ppu_wait_nmi();
split_y(256, 240 - (otherLoopIndex));
clear_asset_table(1);
yScrollPosition = 240 - otherLoopIndex;
draw_current_map_to_nametable(NAMETABLE_A, NAMETABLE_A_ATTRS, 0);
// and bump the player back to the first screen now that we're done.
scroll(0, 240 - HUD_PIXEL_HEIGHT);
xScrollPosition = 0;
yScrollPosition = 0;
// Redraw to B to work around a bug that manifests itself if we scroll
// up a second time, since we expect this to have been drawn to B in its normal location.
clear_asset_table(1);
draw_current_map_to_nametable(NAMETABLE_B, NAMETABLE_B_ATTRS, 0);
}
// and bump the player back to the first screen now that we're done.
scroll(0, 240 - HUD_PIXEL_HEIGHT);
// Hide sprite 0 - it has now served its purpose.
oam_spr(SPRITE_OFFSCREEN, SPRITE_OFFSCREEN, HUD_SPRITE_ZERO_TILE_ID, 0x00, 0);
xScrollPosition = -1;
gameState = GAME_STATE_RUNNING;
}
|
348b7d0c6027ffdd7ad04df1c0577a8f68eec3e6
|
41eb0837713f297134529591b66f3d4d82bcf98e
|
/src/Raine/source/sdl/dialogs/translator.h
|
23a8eb424287fce82eae38d5897903e585615e49
|
[] |
no_license
|
AlexxandreFS/Batocera.PLUS
|
27b196b3cbb781b6fc99e62cad855396d1d5f8f2
|
997ee763ae7135fdf0c34a081e789918bd2eb169
|
refs/heads/master
| 2023-08-17T21:52:39.083687
| 2023-08-17T15:03:44
| 2023-08-17T15:03:44
| 215,869,486
| 135
| 57
| null | 2023-08-14T14:46:14
| 2019-10-17T19:23:42
|
C
|
UTF-8
|
C
| false
| false
| 114
|
h
|
translator.h
|
#ifdef __cplusplus
extern "C" {
#endif
int do_msg(int sel);
int do_screen(int sel);
#ifdef __cplusplus
}
#endif
|
8c60f85b65041decf02723a91a7c6bab31a8b0b6
|
35c04ea32351dc95bc18d46e5c70dda9c1e08668
|
/Examples/KDS/tinyK20/tinyK20_Waveshare_1.54/Sources/GUI_Paint.h
|
c1f9f07f64fe7166a333cc891104c190d1f8fce3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ErichStyger/mcuoneclipse
|
0f8e7a2056a26ed79d9d4a0afd64777ff0b2b2fe
|
04ad311b11860ae5f8285316010961a87fa06d0c
|
refs/heads/master
| 2023-08-28T22:54:08.501719
| 2023-08-25T15:11:44
| 2023-08-25T15:11:44
| 7,446,094
| 620
| 1,191
|
NOASSERTION
| 2020-10-16T03:13:28
| 2013-01-04T19:38:12
|
Batchfile
|
UTF-8
|
C
| false
| false
| 6,426
|
h
|
GUI_Paint.h
|
/*****************************************************************************
* | File : GUI_Paint.h
* | Author : Waveshare team
* | Function : Achieve drawing: draw points, lines, boxes, circles and
* their size, solid dotted line, solid rectangle hollow
* rectangle, solid circle hollow circle.
* | Info :
* Achieve display characters: Display a single character, string, number
* Achieve time display: adaptive size display time minutes and seconds
*----------------
* | This version: V2.0
* | Date : 2018-11-15
* | Info :
* 1.add: Paint_NewImage()
* Create an image's properties
* 2.add: Paint_SelectImage()
* Select the picture to be drawn
* 3.add: Paint_SetRotate()
* Set the direction of the cache
* 4.add: Paint_RotateImage()
* Can flip the picture, Support 0-360 degrees,
* but only 90.180.270 rotation is better
* 4.add: Paint_SetMirroring()
* Can Mirroring the picture, horizontal, vertical, origin
* 5.add: Paint_DrawString_CN()
* Can display Chinese(GB1312)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documnetation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
******************************************************************************/
/*
* GUI_Paint.h
*
* Created on: 25.03.2019
* Author: Patrick Loosli
*/
#ifndef SOURCES_GUI_PAINT_H_
#define SOURCES_GUI_PAINT_H_
#include "fonts.h"
/**
* Image attributes
**/
typedef struct {
uint8_t *Image;
unsigned int Width;
unsigned int Height;
unsigned int WidthMemory;
unsigned int HeightMemory;
unsigned int Color;
unsigned int Rotate;
unsigned int Mirror;
unsigned int WidthByte;
unsigned int HeightByte;
} PAINT;
extern PAINT Paint;
/**
* Display rotate
**/
#define ROTATE_0 0
#define ROTATE_90 90
#define ROTATE_180 180
#define ROTATE_270 270
/**
* Display Flip
**/
typedef enum {
MIRROR_NONE = 0x00,
MIRROR_HORIZONTAL = 0x01,
MIRROR_VERTICAL = 0x02,
MIRROR_ORIGIN = 0x03,
} MIRROR_IMAGE;
#define MIRROR_IMAGE_DFT MIRROR_NONE
/**
* image color
**/
#define WHITE 0xFF
#define BLACK 0x00
#define RED BLACK
#define IMAGE_BACKGROUND WHITE
#define FONT_FOREGROUND BLACK
#define FONT_BACKGROUND WHITE
/**
* The size of the point
**/
typedef enum {
DOT_PIXEL_1X1 = 1, // 1 x 1
DOT_PIXEL_2X2 , // 2 X 2
DOT_PIXEL_3X3 , // 3 X 3
DOT_PIXEL_4X4 , // 4 X 4
DOT_PIXEL_5X5 , // 5 X 5
DOT_PIXEL_6X6 , // 6 X 6
DOT_PIXEL_7X7 , // 7 X 7
DOT_PIXEL_8X8 , // 8 X 8
} DOT_PIXEL;
#define DOT_PIXEL_DFT DOT_PIXEL_1X1 //Default dot pilex
/**
* Point size fill style
**/
typedef enum {
DOT_FILL_AROUND = 1, // dot pixel 1 x 1
DOT_FILL_RIGHTUP , // dot pixel 2 X 2
} DOT_STYLE;
#define DOT_STYLE_DFT DOT_FILL_AROUND //Default dot pilex
/**
* Line style, solid or dashed
**/
typedef enum {
LINE_STYLE_SOLID = 0,
LINE_STYLE_DOTTED,
} LINE_STYLE;
/**
* Whether the graphic is filled
**/
typedef enum {
DRAW_FILL_EMPTY = 0,
DRAW_FILL_FULL,
} DRAW_FILL;
/**
* Custom structure of a time attribute
**/
typedef struct {
unsigned int Year; //0000
unsigned int Month; //1 - 12
unsigned int Day; //1 - 30
unsigned int Hour; //0 - 23
unsigned int Min; //0 - 59
unsigned int Sec; //0 - 59
} PAINT_TIME;
extern PAINT_TIME sPaint_time;
//init and Clear
void Paint_NewImage(uint8_t *image, unsigned int Width, unsigned int Height, unsigned int Rotate, unsigned int Color);
void Paint_SelectImage(uint8_t *image);
void Paint_SetRotate(unsigned int Rotate);
void Paint_SetMirroring(unsigned int mirror);
void Paint_SetPixel(unsigned int Xpoint, unsigned int Ypoint, unsigned int Color);
void Paint_Clear(unsigned int Color);
void Paint_ClearWindows(unsigned int Xstart, unsigned int Ystart, unsigned int Xend, unsigned int Yend, unsigned int Color);
//Drawing
void Paint_DrawPoint(unsigned int Xpoint, unsigned int Ypoint, unsigned int Color, DOT_PIXEL Dot_Pixel, DOT_STYLE Dot_FillWay);
void Paint_DrawLine(unsigned int Xstart, unsigned int Ystart, unsigned int Xend, unsigned int Yend, unsigned int Color, LINE_STYLE Line_Style, DOT_PIXEL Dot_Pixel);
void Paint_DrawRectangle(unsigned int Xstart, unsigned int Ystart, unsigned int Xend, unsigned int Yend, unsigned int Color, DRAW_FILL Filled , DOT_PIXEL Dot_Pixel);
void Paint_DrawCircle(unsigned int X_Center, unsigned int Y_Center, unsigned int Radius, unsigned int Color, DRAW_FILL Draw_Fill , DOT_PIXEL Dot_Pixel);
//Display string
void Paint_DrawChar(unsigned int Xstart, unsigned int Ystart, const char Acsii_Char, sFONT* Font, unsigned int Color_Background, unsigned int Color_Foreground);
void Paint_DrawString_EN(unsigned int Xstart, unsigned int Ystart, const char * pString, sFONT* Font, unsigned int Color_Background, unsigned int Color_Foreground);
void Paint_DrawString_CN(unsigned int Xstart, unsigned int Ystart, const char * pString, cFONT* font, unsigned int Color_Background, unsigned int Color_Foreground);
void Paint_DrawNum(unsigned int Xpoint, unsigned int Ypoint, int32_t Nummber, sFONT* Font, unsigned int Color_Background, unsigned int Color_Foreground);
void Paint_DrawTime(unsigned int Xstart, unsigned int Ystart, PAINT_TIME *pTime, sFONT* Font, unsigned int Color_Background, unsigned int Color_Foreground);
//pic
void Paint_DrawBitMap(const unsigned char* image_buffer);
#endif /* SOURCES_GUI_PAINT_H_ */
|
557e2a027229fe46dd89162cde1304c99b60fdb7
|
7f6c235b0598353549959c18f69eefd20b766907
|
/libsrc/target/s1mp3/rs232/rs232_putchar.c
|
2bfe77c168a7a803bdf4616886b4a54425445a5e
|
[
"ClArtistic"
] |
permissive
|
z88dk/z88dk
|
46dfd4905f36d99333173cadd0a660839befc9f0
|
8b07f37cc43c5d9ffe69b563c80763491d8faff7
|
refs/heads/master
| 2023-09-04T19:29:49.254958
| 2023-09-03T20:51:24
| 2023-09-03T20:51:24
| 54,035,569
| 820
| 263
|
NOASSERTION
| 2023-09-05T11:09:04
| 2016-03-16T13:48:16
|
Assembly
|
UTF-8
|
C
| false
| false
| 369
|
c
|
rs232_putchar.c
|
#include <drivers/rs232.h>
extern port_info_s port_info;
void RS232_Putchar( unsigned char c )
{
int temp;
temp = (port_info.output_insert + 1) & RS232_BUFFERMASK;
if(temp == port_info.output_remove) /*buffer full */
return; /* so end */
port_info.output_buffer[port_info.output_insert] = c;
port_info.output_insert = temp;
}
|
3735cf48860de8ea10b6425d1f5a2a4fddffce6e
|
2247493654c160426c1655281aa7f1dca2bc98dd
|
/src/BoundaryConditions.C
|
d6419edb8a7f873b15439a577b38fd30642ccae8
|
[
"BSD-2-Clause"
] |
permissive
|
NaluCFD/Nalu
|
12999b0e3b76dbeab8fc184f38b65a13b1180bce
|
3286651e494894ac5948c41bf985f987d20c2370
|
refs/heads/master
| 2023-08-10T02:48:04.179859
| 2023-08-02T19:02:46
| 2023-08-02T19:02:46
| 69,712,764
| 138
| 185
|
NOASSERTION
| 2023-09-14T16:42:19
| 2016-10-01T01:25:20
|
C
|
UTF-8
|
C
| false
| false
| 4,768
|
c
|
BoundaryConditions.C
|
/*------------------------------------------------------------------------*/
/* Copyright 2014 Sandia Corporation. */
/* This software is released under the license detailed */
/* in the file, LICENSE, which is located in the top-level Nalu */
/* directory structure */
/*------------------------------------------------------------------------*/
#include <Realm.h>
#include <BoundaryConditions.h>
#include <NaluEnv.h>
// yaml for parsing..
#include <yaml-cpp/yaml.h>
#include <NaluParsing.h>
namespace sierra{
namespace nalu{
//==========================================================================
// Class Definition
//==========================================================================
// BoundaryCondition - do some stuff
//==========================================================================
//--------------------------------------------------------------------------
//-------- constructor -----------------------------------------------------
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
//-------- destructor ------------------------------------------------------
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
//-------- load -----------------------------------------------
//--------------------------------------------------------------------------
/// this is an example of a load() method with polymorphism - the type of
/// the node is determined from some information, then a particular type
/// of object is created and returned to the parent.
BoundaryCondition * BoundaryCondition::load(const YAML::Node & node)
{
if ( node["wall_boundary_condition"] ){
WallBoundaryConditionData& wallBC = *new WallBoundaryConditionData(*parent());
node >> wallBC;
NaluEnv::self().naluOutputP0() << "Wall BC name: " << wallBC.bcName_
<< " on " << wallBC.targetName_ << std::endl;
return &wallBC;
}
else if (node["inflow_boundary_condition"]) {
InflowBoundaryConditionData& inflowBC = *new InflowBoundaryConditionData(*parent());
node >> inflowBC;
NaluEnv::self().naluOutputP0() << "Inflow BC name: " << inflowBC.bcName_
<< " on " << inflowBC.targetName_ << std::endl;
return &inflowBC;
}
else if (node["open_boundary_condition"]) {
OpenBoundaryConditionData& openBC = *new OpenBoundaryConditionData(*parent());
node >> openBC;
NaluEnv::self().naluOutputP0() << "Open BC name: " << openBC.bcName_
<< " on " << openBC.targetName_ << std::endl;
return &openBC;
}
else if (node["symmetry_boundary_condition"]) {
SymmetryBoundaryConditionData& symmetryBC = *new SymmetryBoundaryConditionData(*parent());
node >> symmetryBC;
NaluEnv::self().naluOutputP0() << "Symmetry BC name: " << symmetryBC.bcName_
<< " on " << symmetryBC.targetName_ << std::endl;
return &symmetryBC;
}
else if (node["periodic_boundary_condition"]) {
PeriodicBoundaryConditionData& periodicBC = *new PeriodicBoundaryConditionData(*parent());
node >> periodicBC;
NaluEnv::self().naluOutputP0() << "Periodic BC name: " << periodicBC.bcName_
<< " between " << periodicBC.monarchSubject_.monarch_
<< " and "<< periodicBC.monarchSubject_.subject_ << std::endl;
return &periodicBC;
}
else if (node["non_conformal_boundary_condition"]) {
NonConformalBoundaryConditionData& nonConformalBC = *new NonConformalBoundaryConditionData(*parent());
node >> nonConformalBC;
NaluEnv::self().naluOutputP0() << "NonConformal BC name: " << nonConformalBC.bcName_
<< " using " << nonConformalBC.targetName_ << std::endl;
return &nonConformalBC;
}
else if (node["overset_boundary_condition"]) {
OversetBoundaryConditionData& oversetBC = *new OversetBoundaryConditionData(*parent());
node >> oversetBC;
NaluEnv::self().naluOutputP0() << "Overset BC name: " << oversetBC.bcName_ << std::endl;
return &oversetBC;
}
else {
throw std::runtime_error("parser error BoundaryConditions::load: no such bc type");
}
return 0;
}
Simulation* BoundaryCondition::root() { return parent()->root(); }
BoundaryConditions *BoundaryCondition::parent() { return &boundaryConditions_; }
Simulation* BoundaryConditions::root() { return parent()->root(); }
Realm *BoundaryConditions::parent() { return &realm_; }
} // namespace nalu
} // namespace Sierra
|
8ddc6f6d1dab171699f12a6836f10300d54f181f
|
fb47ab6337a71029dee71933e449cf7f6805fc0f
|
/platform/lpc43xx/include/platform/lpc43xx-sgpio.h
|
b23dd3e9a27a5686d305e8ce873bea398d9a71c4
|
[
"MIT"
] |
permissive
|
littlekernel/lk
|
7e7ba50b87b1f2e0b6e2f052c59249825c91975b
|
30dc320054f70910e1c1ee40a6948ee99672acec
|
refs/heads/master
| 2023-09-02T00:47:52.203963
| 2023-06-21T22:42:35
| 2023-06-21T22:42:35
| 3,058,456
| 3,077
| 618
|
MIT
| 2023-08-30T09:41:31
| 2011-12-27T19:19:36
|
C
|
UTF-8
|
C
| false
| false
| 3,499
|
h
|
lpc43xx-sgpio.h
|
/*
* Copyright (c) 2015 Brian Swetland
*
* Use of this source code is governed by a MIT-style
* license that can be found in the LICENSE file or at
* https://opensource.org/licenses/MIT
*/
#pragma once
#define SGPIO_OUT_CFG(n) (0x40101000 + ((n) * 4))
#define CFG_OUT_M1 0x00
#define CFG_OUT_M2A 0x01
#define CFG_OUT_M2B 0x02
#define CFG_OUT_M2C 0x03
#define CFG_OUT_GPIO 0x04
#define CFG_OUT_M4A 0x05
#define CFG_OUT_M4B 0x06
#define CFG_OUT_M4C 0x07
#define CFG_OUT_CLK 0x08
#define CFG_OUT_M8A 0x09
#define CFG_OUT_M8B 0x0A
#define CFG_OUT_M8C 0x0B
#define CFG_OE_GPIO 0x00
#define CFG_OE_M1 0x40
#define CFG_OE_M2 0x50
#define CFG_OE_M4 0x60
#define CFG_OE_M8 0x70
#define SLICE_CFG1(n) (0x40101040 + ((n) * 4))
#define CLK_USE_SLICE (0 << 0)
#define CLK_USE_PIN (1 << 0)
#define CLK_PIN_SGPIO8 (0 << 1)
#define CLK_PIN_SGPIO9 (1 << 1)
#define CLK_PIN_SGPIO10 (2 << 1)
#define CLK_PIN_SGPIO11 (3 << 1)
#define CLK_SLICE_D (0 << 3)
#define CLK_SLICE_H (1 << 3)
#define CLK_SLICE_O (2 << 3)
#define CLK_SLICE_P (3 << 3)
#define QUAL_ENABLE (0 << 5)
#define QUAL_DISABLE (1 << 5)
#define QUAL_USE_SLICE (2 << 5)
#define QUAL_USE_PIN (3 << 5)
#define QUAL_PIN_SGPIO8 (0 << 7)
#define QUAL_PIN_SGPIO9 (1 << 7)
#define QUAL_PIN_SGPIO10 (2 << 7)
#define QUAL_PIN_SGPIO11 (3 << 7)
#define QUAL_SLICE_A (0 << 9) // D for SLICE A
#define QUAL_SLICE_H (1 << 9) // O for SLICE H
#define QUAL_SLICE_I (2 << 9) // D for SLICE I
#define QUAL_SLICE_P (3 << 9) // O for SLICE P
#define CONCAT_PIN (0 << 11)
#define CONCAT_SLICE (1 << 11)
#define CONCAT_LOOP (0 << 12)
#define CONCAT_2_SLICE (1 << 12)
#define CONCAT_4_SLICE (2 << 12)
#define CONCAT_8_SLICE (3 << 12)
#define SLICE_CFG2(n) (0x40101080 + ((n) * 4))
#define MATCH_MODE (1 << 0)
#define CLK_GEN_INTERNAL (0 << 2) // from COUNTER
#define CLK_GEN_EXTERNAL (1 << 2) // from PIN or SLICE
#define INV_CLK_OUT (1 << 3)
#define SHIFT_1BPC (0 << 6)
#define SHIFT_2BPC (1 << 6)
#define SHIFT_4BPC (2 << 6)
#define SHIFT_8BPC (3 << 6)
#define INVERT_QUALIFIER (1 << 8)
#define SLICE_REG(n) (0x401010C0 + ((n) * 4)) // main shift reg
#define SLICE_SHADOW(n) (0x40101100 + ((n) * 4)) // swapped @ POS underflow
#define SLICE_PRESET(n) (0x40101140 + ((n) * 4)) // 12bit -> COUNT @ 0
#define SLICE_COUNT(n) (0x40101180 + ((n) * 4)) // 12 bit downcount
#define SLICE_POS(n) (0x401011C0 + ((n) * 4))
#define POS_POS(n) ((n) << 0) // value at start
#define POS_RESET(n) ((n) << 8) // load at underflow
#define SGPIO_IN (0x40101210)
#define SGPIO_OUT (0x40101214)
#define SGPIO_OEN (0x40101218)
#define SLICE_CTRL_ENABLE (0x4010121C)
#define SLICE_CTRL_DISABLE (0x40101220)
#define SLICE_XHG_STS (0x40101F2C)
#define SLICE_XHG_STS_CLR (0x40101F30)
#define SLICE_XHG_STS_SET (0x40101F34)
#define SLC_A 0
#define SLC_B 1
#define SLC_C 2
#define SLC_D 3
#define SLC_E 4
#define SLC_F 5
#define SLC_G 6
#define SLC_H 7
#define SLC_I 8
#define SLC_J 9
#define SLC_K 10
#define SLC_L 11
#define SLC_M 12
#define SLC_N 13
#define SLC_O 14
#define SLC_P 15
|
954dfee32f206d21abe0fd6592a22e764b8ecb02
|
79d343002bb63a44f8ab0dbac0c9f4ec54078c3a
|
/lib/libc/mingw/stdio/vasprintf.c
|
b441fa18c5767d96e3c0e472f7b310e5b2ecb4b7
|
[
"MIT",
"LGPL-2.0-or-later",
"ZPL-2.1",
"LicenseRef-scancode-public-domain"
] |
permissive
|
ziglang/zig
|
4aa75d8d3bcc9e39bf61d265fd84b7f005623fc5
|
f4c9e19bc3213c2bc7e03d7b06d7129882f39f6c
|
refs/heads/master
| 2023-08-31T13:16:45.980913
| 2023-08-31T05:50:29
| 2023-08-31T05:50:29
| 40,276,274
| 25,560
| 2,399
|
MIT
| 2023-09-14T21:09:50
| 2015-08-06T00:51:28
|
Zig
|
UTF-8
|
C
| false
| false
| 555
|
c
|
vasprintf.c
|
#define _GNU_SOURCE
#define __CRT__NO_INLINE
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
int vasprintf(char ** __restrict__ ret,
const char * __restrict__ format,
va_list ap) {
int len;
/* Get Length */
len = _vscprintf(format,ap);
if (len < 0) return -1;
/* +1 for \0 terminator. */
*ret = malloc(len + 1);
/* Check malloc fail*/
if (!*ret) return -1;
/* Write String */
_vsnprintf(*ret,len+1,format,ap);
/* Terminate explicitly */
(*ret)[len] = '\0';
return len;
}
|
2f363db52787b6137a8c033616005afc897207f7
|
5aa8006a9a48e999fa5d5092fe893b16b84b7999
|
/src/main/host/descriptor/tcp_cong_reno.h
|
aa19855c75f983eb1e1ad9ef62a71228ac815b6e
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
shadow/shadow
|
5a18451ac5a288f82f199ea59bc5adb783de9d7d
|
021dbe7672dbfede4c8093ccd260c61cd3986731
|
refs/heads/main
| 2023-09-02T19:47:30.119593
| 2023-09-01T00:18:23
| 2023-09-01T00:18:23
| 1,759,835
| 1,309
| 252
|
NOASSERTION
| 2023-09-14T19:17:48
| 2011-05-17T08:18:57
|
Rust
|
UTF-8
|
C
| false
| false
| 310
|
h
|
tcp_cong_reno.h
|
#ifndef SHD_TCP_CONG_RENO_H_
#define SHD_TCP_CONG_RENO_H_
#include "main/host/descriptor/tcp.h"
#include "main/host/descriptor/tcp_cong.h"
// the name linux gives for this congestion control algorithm
extern const char* TCP_CONG_RENO_NAME;
void tcp_cong_reno_init(TCP *tcp);
#endif // SHD_TCP_CONG_RENO_H_
|
f41177f630f9c74de256c7c49c8839cacaa54732
|
376e1818d427b5e4d32fa6dd6c7b71e9fd88afdb
|
/www/pound/patches/patch-svc.c
|
4d213bb69c2fdbdbb92cb3a44879698594fa0561
|
[] |
no_license
|
NetBSD/pkgsrc
|
a0732c023519650ef821ab89c23ab6ab59e25bdb
|
d042034ec4896cc5b47ed6f2e5b8802d9bc5c556
|
refs/heads/trunk
| 2023-09-01T07:40:12.138283
| 2023-09-01T05:25:19
| 2023-09-01T05:25:19
| 88,439,572
| 321
| 138
| null | 2023-07-12T22:34:14
| 2017-04-16T20:04:15
| null |
UTF-8
|
C
| false
| false
| 7,628
|
c
|
patch-svc.c
|
$NetBSD: patch-svc.c,v 1.1 2019/09/09 09:08:08 nia Exp $
[PATCH] Support for Openssl 1.1
https://github.com/graygnuorg/pound/commit/a2c9dde4d055ea8942afb150b7fc3a807d4e5d60.patch
--- svc.c.orig 2018-05-11 10:16:05.000000000 +0000
+++ svc.c
@@ -27,10 +27,17 @@
#include "pound.h"
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+# define TABNODE_GET_DOWN_LOAD(t) lh_TABNODE_get_down_load(t)
+# define TABNODE_SET_DOWN_LOAD(t,n) lh_TABNODE_set_down_load(t,n)
+#else
#ifndef LHASH_OF
#define LHASH_OF(x) LHASH
#define CHECKED_LHASH_OF(type, h) h
#endif
+# define TABNODE_GET_DOWN_LOAD(t) (CHECKED_LHASH_OF(TABNODE, t)->down_load)
+# define TABNODE_SET_DOWN_LOAD(t,n) (CHECKED_LHASH_OF(TABNODE, t)->down_load = n)
+#endif
/*
* Add a new key/content pair to a hash table
@@ -58,7 +65,9 @@ t_add(LHASH_OF(TABNODE) *const tab, cons
}
memcpy(t->content, content, cont_len);
t->last_acc = time(NULL);
-#if OPENSSL_VERSION_NUMBER >= 0x10000000L
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ if((old = lh_TABNODE_insert(tab, t)) != NULL) {
+#elif OPENSSL_VERSION_NUMBER >= 0x10000000L
if((old = LHM_lh_insert(TABNODE, tab, t)) != NULL) {
#else
if((old = (TABNODE *)lh_insert(tab, t)) != NULL) {
@@ -82,7 +91,9 @@ t_find(LHASH_OF(TABNODE) *const tab, cha
TABNODE t, *res;
t.key = key;
-#if OPENSSL_VERSION_NUMBER >= 0x10000000L
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ if((res = lh_TABNODE_retrieve(tab, &t)) != NULL) {
+#elif OPENSSL_VERSION_NUMBER >= 0x10000000L
if((res = (TABNODE *)LHM_lh_retrieve(TABNODE, tab, &t)) != NULL) {
#else
if((res = (TABNODE *)lh_retrieve(tab, &t)) != NULL) {
@@ -102,7 +113,9 @@ t_remove(LHASH_OF(TABNODE) *const tab, c
TABNODE t, *res;
t.key = key;
-#if OPENSSL_VERSION_NUMBER >= 0x10000000L
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ if((res = lh_TABNODE_delete(tab, &t)) != NULL) {
+#elif OPENSSL_VERSION_NUMBER >= 0x10000000L
if((res = LHM_lh_delete(TABNODE, tab, &t)) != NULL) {
#else
if((res = (TABNODE *)lh_delete(tab, &t)) != NULL) {
@@ -127,7 +140,9 @@ t_old_doall_arg(TABNODE *t, ALL_ARG *a)
TABNODE *res;
if(t->last_acc < a->lim)
-#if OPENSSL_VERSION_NUMBER >= 0x10000000L
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ if((res = lh_TABNODE_delete(a->tab, t)) != NULL) {
+#elif OPENSSL_VERSION_NUMBER >= 0x10000000L
if((res = LHM_lh_delete(TABNODE, a->tab, t)) != NULL) {
#else
if((res = lh_delete(a->tab, t)) != NULL) {
@@ -145,6 +160,10 @@ IMPLEMENT_LHASH_DOALL_ARG_FN(t_old, TABN
IMPLEMENT_LHASH_DOALL_ARG_FN(t_old, TABNODE *, ALL_ARG *)
#endif
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+IMPLEMENT_LHASH_DOALL_ARG(TABNODE,ALL_ARG);
+#endif
+
/*
* Expire all old nodes
*/
@@ -156,14 +175,16 @@ t_expire(LHASH_OF(TABNODE) *const tab, c
a.tab = tab;
a.lim = lim;
- down_load = CHECKED_LHASH_OF(TABNODE, tab)->down_load;
- CHECKED_LHASH_OF(TABNODE, tab)->down_load = 0;
-#if OPENSSL_VERSION_NUMBER >= 0x10000000L
+ down_load = TABNODE_GET_DOWN_LOAD(tab);
+ TABNODE_SET_DOWN_LOAD(tab, 0);
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ lh_TABNODE_doall_ALL_ARG(tab, t_old_doall_arg, &a);
+#elif OPENSSL_VERSION_NUMBER >= 0x10000000L
LHM_lh_doall_arg(TABNODE, tab, LHASH_DOALL_ARG_FN(t_old), ALL_ARG, &a);
#else
lh_doall_arg(tab, LHASH_DOALL_ARG_FN(t_old), &a);
#endif
- CHECKED_LHASH_OF(TABNODE, tab)->down_load = down_load;
+ TABNODE_SET_DOWN_LOAD(tab, down_load);
return;
}
@@ -173,7 +194,9 @@ t_cont_doall_arg(TABNODE *t, ALL_ARG *ar
TABNODE *res;
if(memcmp(t->content, arg->content, arg->cont_len) == 0)
-#if OPENSSL_VERSION_NUMBER >= 0x10000000L
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ if((res = lh_TABNODE_delete(arg->tab, t)) != NULL) {
+#elif OPENSSL_VERSION_NUMBER >= 0x10000000L
if((res = LHM_lh_delete(TABNODE, arg->tab, t)) != NULL) {
#else
if((res = lh_delete(arg->tab, t)) != NULL) {
@@ -203,15 +226,16 @@ t_clean(LHASH_OF(TABNODE) *const tab, vo
a.tab = tab;
a.content = content;
a.cont_len = cont_len;
- down_load = CHECKED_LHASH_OF(TABNODE, tab)->down_load;
- CHECKED_LHASH_OF(TABNODE, tab)->down_load = 0;
-#if OPENSSL_VERSION_NUMBER >= 0x10000000L
+ down_load = TABNODE_GET_DOWN_LOAD(tab);
+ TABNODE_SET_DOWN_LOAD(tab, 0);
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ lh_TABNODE_doall_ALL_ARG(tab, t_cont_doall_arg, &a);
+#elif OPENSSL_VERSION_NUMBER >= 0x10000000L
LHM_lh_doall_arg(TABNODE, tab, LHASH_DOALL_ARG_FN(t_cont), ALL_ARG, &a);
#else
lh_doall_arg(tab, LHASH_DOALL_ARG_FN(t_cont), &a);
#endif
- CHECKED_LHASH_OF(TABNODE, tab)->down_load = down_load;
- return;
+ TABNODE_SET_DOWN_LOAD(tab, down_load);
}
/*
@@ -1259,6 +1283,31 @@ RSA_tmp_callback(/* not used */SSL *ssl,
return res;
}
+static int
+generate_key(RSA **ret_rsa, unsigned long bits)
+{
+#if OPENSSL_VERSION_NUMBER > 0x00908000L
+ int rc = 0;
+ RSA *rsa;
+
+ rsa = RSA_new();
+ if (rsa) {
+ BIGNUM *bne = BN_new();
+ if (BN_set_word(bne, RSA_F4))
+ rc = RSA_generate_key_ex(rsa, bits, bne, NULL);
+ BN_free(bne);
+ if (rc)
+ *ret_rsa = rsa;
+ else
+ RSA_free(rsa);
+ }
+ return rc;
+#else
+ *ret_rsa = RSA_generate_key(bits, RSA_F4, NULL, NULL);
+ return *ret_rsa != NULL;
+#endif
+}
+
/*
* Periodically regenerate ephemeral RSA keys
* runs every T_RSA_KEYS seconds
@@ -1271,8 +1320,9 @@ do_RSAgen(void)
RSA *t_RSA1024_keys[N_RSA_KEYS];
for(n = 0; n < N_RSA_KEYS; n++) {
- t_RSA512_keys[n] = RSA_generate_key(512, RSA_F4, NULL, NULL);
- t_RSA1024_keys[n] = RSA_generate_key(1024, RSA_F4, NULL, NULL);
+ /* FIXME: Error handling */
+ generate_key(&t_RSA512_keys[n], 512);
+ generate_key(&t_RSA1024_keys[n], 1024);
}
if(ret_val = pthread_mutex_lock(&RSA_mut))
logmsg(LOG_WARNING, "thr_RSAgen() lock: %s", strerror(ret_val));
@@ -1326,11 +1376,11 @@ init_timer(void)
* Pre-generate ephemeral RSA keys
*/
for(n = 0; n < N_RSA_KEYS; n++) {
- if((RSA512_keys[n] = RSA_generate_key(512, RSA_F4, NULL, NULL)) == NULL) {
+ if(!generate_key(&RSA512_keys[n], 512)) {
logmsg(LOG_WARNING,"RSA_generate(%d, 512) failed", n);
return;
}
- if((RSA1024_keys[n] = RSA_generate_key(1024, RSA_F4, NULL, NULL)) == NULL) {
+ if(!generate_key(&RSA1024_keys[n], 1024)) {
logmsg(LOG_WARNING,"RSA_generate(%d, 1024) failed", n);
return;
}
@@ -1417,6 +1467,10 @@ IMPLEMENT_LHASH_DOALL_ARG_FN(t_dump, TAB
IMPLEMENT_LHASH_DOALL_ARG_FN(t_dump, TABNODE *, DUMP_ARG *)
#endif
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+IMPLEMENT_LHASH_DOALL_ARG(TABNODE,DUMP_ARG);
+#endif
+
/*
* write sessions to the control socket
*/
@@ -1427,7 +1481,9 @@ dump_sess(const int control_sock, LHASH_
a.control_sock = control_sock;
a.backends = backends;
-#if OPENSSL_VERSION_NUMBER >= 0x10000000L
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ lh_TABNODE_doall_DUMP_ARG(sess, t_dump_doall_arg, &a);
+#elif OPENSSL_VERSION_NUMBER >= 0x10000000L
LHM_lh_doall_arg(TABNODE, sess, LHASH_DOALL_ARG_FN(t_dump), DUMP_ARG, &a);
#else
lh_doall_arg(sess, LHASH_DOALL_ARG_FN(t_dump), &a);
@@ -1661,6 +1717,13 @@ thr_control(void *arg)
}
}
+#ifndef SSL3_ST_SR_CLNT_HELLO_A
+# define SSL3_ST_SR_CLNT_HELLO_A (0x110|SSL_ST_ACCEPT)
+#endif
+#ifndef SSL23_ST_SR_CLNT_HELLO_A
+# define SSL23_ST_SR_CLNT_HELLO_A (0x210|SSL_ST_ACCEPT)
+#endif
+
void
SSLINFO_callback(const SSL *ssl, int where, int rc)
{
|
d42045fcf70875c84dcaf7f782120ec7e21ac2ba
|
d9e96244515264268d6078650fa707f34d94ee7a
|
/Working/Game/xleres/Surface.h
|
93e8393e4e390a12e579ebdf5b24a847b2009124
|
[
"MIT"
] |
permissive
|
xlgames-inc/XLE
|
45c89537c10561e216367a2e3bcd7d1c92b1b039
|
69cc4f2aa4faf12ed15bb4291c6992c83597899c
|
refs/heads/master
| 2022-06-29T17:16:11.491925
| 2022-05-04T00:29:28
| 2022-05-04T00:29:28
| 29,281,799
| 396
| 102
| null | 2016-01-04T13:35:59
| 2015-01-15T05:07:55
|
C++
|
UTF-8
|
C
| false
| false
| 10,543
|
h
|
Surface.h
|
// Copyright 2015 XLGAMES Inc.
//
// Distributed under the MIT License (See
// accompanying file "LICENSE" or the website
// http://www.opensource.org/licenses/mit-license.php)
#if !defined(SURFACE_H)
#define SURFACE_H
#include "CommonResources.h"
#include "MainGeometry.h"
#include "SurfaceAlgorithm.h"
#include "Transform.h"
#include "Animation\SkinTransform.h"
// Accessors for properties in MainGeometry structs
///////////////////////////////////////////////////////////////////////////////////////////////////
// VSInput //
///////////////////////////////////////////////////////////////////////////////////////////////////
float3 VSIn_GetLocalPosition(VSInput input)
{
#if GEO_HAS_SKIN_WEIGHTS
return TransformPositionThroughSkinning(input, input.position.xyz);
#else
return input.position.xyz;
#endif
}
float4 VSIn_GetLocalTangent(VSInput input)
{
#if (GEO_HAS_TANGENT_FRAME==1)
return float4(TransformDirectionVectorThroughSkinning(input, input.tangent.xyz), input.tangent.w);
#else
return 0.0.xxxx;
#endif
}
#if GEO_HAS_NORMAL==1
float3 VSIn_GetLocalNormal(VSInput input)
{
#if GEO_V_NORMAL_UNSIGNED==1
return TransformDirectionVectorThroughSkinning(input, input.normal * 2.0.xxx - 1.0.xxx);
#else
return TransformDirectionVectorThroughSkinning(input, input.normal);
#endif
}
#endif
float3 VSIn_GetLocalBitangent(VSInput input)
{
#if (GEO_HAS_BITANGENT==1)
return TransformDirectionVectorThroughSkinning(input, input.bitangent.xyz);
#elif (GEO_HAS_TANGENT_FRAME==1) && (GEO_HAS_NORMAL==1)
float4 tangent = VSIn_GetLocalTangent(input);
float3 normal = VSIn_GetLocalNormal(input);
return cross(tangent.xyz, normal) * GetWorldTangentFrameHandiness(tangent);
#else
return 0.0.xxx;
#endif
}
#if GEO_HAS_NORMAL!=1
float3 VSIn_GetLocalNormal(VSInput input)
{
#if GEO_HAS_TANGENT_FRAME==1
// if the tangent and bitangent are unit-length and perpendicular, then we
// shouldn't have to normalize here. Since the inputs are coming from the
// vertex buffer, let's assume it's ok
float4 localTangent = VSIn_GetLocalTangent(input);
float3 localBitangent = VSIn_GetLocalBitangent(input);
return NormalFromTangents(localTangent.xyz, localBitangent.xyz, GetWorldTangentFrameHandiness(localTangent));
#else
return float3(0,0,1);
#endif
}
#endif
#if (GEO_HAS_TANGENT_FRAME==1)
TangentFrameStruct VSIn_GetWorldTangentFrame(VSInput input)
{
// If we can guarantee no scale on local-to-world, we can skip normalize of worldtangent/worldbitangent
float4 localTangent = VSIn_GetLocalTangent(input);
float3 worldTangent = LocalToWorldUnitVector(localTangent.xyz);
float3 worldBitangent = LocalToWorldUnitVector(VSIn_GetLocalBitangent(input));
float handiness = GetWorldTangentFrameHandiness(localTangent);
// There's some issues here. If local-to-world has a flip on it, it might flip
// the direction we get from the cross product here... That's probably not
// what's expected.
// (worldNormal shouldn't need to be normalized, so long as worldTangent
// and worldNormal are perpendicular to each other)
#if GEO_HAS_NORMAL==1
float3 worldNormal = LocalToWorldUnitVector(VSIn_GetLocalNormal(input));
#else
float3 worldNormal = NormalFromTangents(worldTangent, worldBitangent, handiness);
#endif
return BuildTangentFrame(worldTangent, worldBitangent, worldNormal, handiness);
}
#endif
///////////////////////////////////////////////////////////////////////////////////////////////////
// VSOutput //
///////////////////////////////////////////////////////////////////////////////////////////////////
float2 GetTexCoord(VSOutput geo)
{
#if OUTPUT_TEXCOORD==1 /////////////////////////////////////////////
return geo.texCoord;
#else
return 0.0.xx;
#endif //////////////////////////////////////////////////////////////
}
float4 GetColor(VSOutput geo)
{
#if OUTPUT_COLOUR>=2 ////////////////////////////////////////////////
return float4(geo.colour.rgb, 1.f);
#elif OUTPUT_COLOUR>=1
return geo.colour;
#else
return 1.0.xxxx;
#endif //////////////////////////////////////////////////////////////
}
float3 GetWorldViewVector(VSOutput geo)
{
#if OUTPUT_WORLD_VIEW_VECTOR==1
return geo.worldViewVector;
#elif OUTPUT_WORLD_POSITION==1
return WorldSpaceView.xyz - geo.worldPosition; // if we have either the world-view-world or world-position it's a bit redundant to have the other
#else
return 0.0.xxx;
#endif
}
float3 GetLocalViewVector(VSOutput geo)
{
#if OUTPUT_LOCAL_VIEW_VECTOR==1
return geo.localViewVector;
#else
return 0.0.xxx;
#endif
}
float3 GetWorldPosition(VSOutput geo)
{
#if OUTPUT_WORLD_POSITION==1
return geo.worldPosition;
#elif OUTPUT_WORLD_VIEW_VECTOR==1
return WorldSpaceView.xyz - geo.worldViewVector; // if we have either the world-view-world or world-position it's a bit redundant to have the other
#else
return 0.0.xxx;
#endif
}
#if (OUTPUT_TANGENT_FRAME==1)
TangentFrameStruct GetWorldTangentFrame(VSOutput geo)
{
TangentFrameStruct result;
result.tangent = geo.tangent.xyz;
result.bitangent = geo.bitangent;
result.normal = geo.normal;
// note -- The denormalization caused by per vertex interpolation
// is fairly subtle. We could perhaps skip this on all but
// the highest quality modes..?
// Also, there are other options:
// - higher order interpolation across the triangle using geometry shaders
// - using cotangent stuff particularly with derivative maps
const bool doRenormalize = true;
if (doRenormalize) {
result.tangent = normalize(result.tangent);
result.bitangent = normalize(result.bitangent);
result.normal = normalize(result.normal);
}
result.handiness = 1.f; // (handiness value is lost in this case)
return result;
}
#endif
#if (OUTPUT_LOCAL_TANGENT_FRAME==1)
TangentFrameStruct GetLocalTangentFrame(VSOutput geo)
{
TangentFrameStruct result;
result.tangent = normalize(geo.localTangent.xyz);
result.bitangent = normalize(geo.localBitangent);
result.handiness = GetWorldTangentFrameHandiness(geo.localTangent);
#if (OUTPUT_LOCAL_NORMAL)
result.normal = normalize(geo.localNormal);
#else
// note -- it's possible that the tangent and bitangent could
// fall out of alignment during edge interpolation. That
// could potentially result in a non-unit length normal
// (but it would also result in other subtle artefacts in
// the normal map. Let's try to cheat and avoid the normalize,
// (and just assume it's close to unit length)
result.normal = NormalFromTangents(result.tangent, result.bitangent, result.handiness);
#endif
return result;
}
#endif
float3 GetVertexNormal(VSOutput geo)
{
#if OUTPUT_TANGENT_FRAME==1
return normalize(geo.normal);
#elif OUTPUT_LOCAL_TANGENT_FRAME==1
return GetLocalTangentFrame(geo).normal;
#elif (OUTPUT_NORMAL==1)
return normalize(geo.normal);
#else
return 0.0.xxx;
#endif
}
float3 SampleDefaultNormalMap(VSOutput geo)
{
#if defined(RES_HAS_NormalsTexture_DXT)
bool dxtNormalMap = RES_HAS_NormalsTexture_DXT==1;
#else
bool dxtNormalMap = false;
#endif
return SampleNormalMap(NormalsTexture, DefaultSampler, dxtNormalMap, GetTexCoord(geo));
}
float3 TransformNormalMapToWorld(float3 normalTextureSample, VSOutput geo)
{
#if OUTPUT_TANGENT_FRAME==1
#if (RES_HAS_NormalsTexture==1) && (OUTPUT_TEXCOORD==1)
TangentFrameStruct tangentFrame = GetWorldTangentFrame(geo);
float3x3 normalsTextureToWorld = float3x3(tangentFrame.tangent.xyz, tangentFrame.bitangent, tangentFrame.normal);
return mul(normalTextureSample, normalsTextureToWorld);
#else
return normalize(geo.normal);
#endif
#elif OUTPUT_LOCAL_TANGENT_FRAME==1
#if (RES_HAS_NormalsTexture==1) && (OUTPUT_TEXCOORD==1)
TangentFrameStruct localTangentFrame = GetLocalTangentFrame(geo);
float3x3 normalsTextureToLocal = float3x3(localTangentFrame.tangent.xyz, localTangentFrame.bitangent, localTangentFrame.normal);
float3 localNormal = mul(normalTextureSample, normalsTextureToLocal);
// note -- Problems when there is a scale on LocalToWorld here.
// There are many objects with uniform scale values, and they require a normalize here.
// Ideally we'd have a LocalToWorld matrix with the scale removed,
// or at least a "uniform scale" scalar to remove the scaling
return normalize(mul(GetLocalToWorldUniformScale(), localNormal));
#else
return normalize(mul(GetLocalToWorldUniformScale(), GetLocalTangentFrame(geo).normal));
#endif
#elif (OUTPUT_NORMAL==1) && (RES_HAS_NormalsTexture==1) && (OUTPUT_TEXCOORD==1) && ((OUTPUT_WORLD_VIEW_VECTOR==1) || (OUTPUT_WORLD_VIEW_VECTOR==1))
float3x3 normalsTextureToWorld = AutoCotangentFrame(normalize(geo.normal), GetWorldViewVector(geo), geo.texCoord);
// Note -- matrix multiply opposite from normal (so we can initialise normalsTextureToWorld easily)
return mul(normalTextureSample, normalsTextureToWorld);
#elif (OUTPUT_LOCAL_NORMAL==1) && (RES_HAS_NormalsTexture==1) && (OUTPUT_TEXCOORD==1) && (OUTPUT_LOCAL_VIEW_VECTOR==1)
float3x3 normalsTextureToWorld = AutoCotangentFrame(normalize(geo.localNormal), GetLocalViewVector(geo), geo.texCoord);
// Note -- matrix multiply opposite from normal (so we can initialise normalsTextureToWorld easily)
return mul(normalTextureSample, normalsTextureToWorld);
#elif (OUTPUT_NORMAL==1)
return normalize(geo.normal);
#else
return 0.0.xxx;
#endif
}
float3 GetNormal(VSOutput geo)
{
return TransformNormalMapToWorld(SampleDefaultNormalMap(geo), geo);
}
void DoAlphaTest(VSOutput geo, float alphaThreshold)
{
#if (OUTPUT_TEXCOORD==1) && ((MAT_ALPHA_TEST==1)||(MAT_ALPHA_TEST_PREDEPTH==1))
#if (USE_CLAMPING_SAMPLER_FOR_DIFFUSE==1)
AlphaTestAlgorithm(DiffuseTexture, ClampingSampler, geo.texCoord, alphaThreshold);
#else
AlphaTestAlgorithm(DiffuseTexture, MaybeAnisotropicSampler, geo.texCoord, alphaThreshold);
#endif
#endif
}
#endif
|
8a42c2251c8b8284532d4589d83e5346ea0f345a
|
70ffcb065027032a399f44b9ec91e7b61d73583d
|
/channeld/full_channel.h
|
c0fbe6b7d4c81d11b668938593b2302d1e566590
|
[
"MIT"
] |
permissive
|
ElementsProject/lightning
|
4e260841b2ebad8c772a5ff91ef1ebbc3fe1ad71
|
990096f904e26386527a4eddd8d3262464bacabd
|
refs/heads/master
| 2023-09-01T07:11:34.794039
| 2023-08-31T09:55:14
| 2023-08-31T22:02:14
| 37,350,472
| 2,812
| 961
|
NOASSERTION
| 2023-09-14T19:33:22
| 2015-06-13T00:04:22
|
C
|
UTF-8
|
C
| false
| false
| 11,506
|
h
|
full_channel.h
|
/* This is the full channel routines, with HTLC support. */
#ifndef LIGHTNING_CHANNELD_FULL_CHANNEL_H
#define LIGHTNING_CHANNELD_FULL_CHANNEL_H
#include "config.h"
#include <channeld/channeld_htlc.h>
#include <channeld/full_channel_error.h>
#include <common/initial_channel.h>
#include <common/sphinx.h>
struct channel_id;
struct existing_htlc;
/**
* new_full_channel: Given initial fees and funding, what is initial state?
* @ctx: tal context to allocate return value from.
* @cid: The channel id.
* @funding: The commitment transaction id/output number.
* @minimum_depth: The minimum confirmations needed for funding transaction.
* @blockheight_states: The blockheight update states.
* @lease_expiry: The block the lease on this channel expires at; 0 if no lease.
* @funding_sats: The commitment transaction amount.
* @local_msat: The amount for the local side (remainder goes to remote)
* @fee_states: The fee update states.
* @local: local channel configuration
* @remote: remote channel configuration
* @local_basepoints: local basepoints.
* @remote_basepoints: remote basepoints.
* @local_fundingkey: local funding key
* @remote_fundingkey: remote funding key
* @type: type for this channel
* @option_wumbo: large channel negotiated.
* @opener: which side initiated it.
*
* Returns state, or NULL if malformed.
*/
struct channel *new_full_channel(const tal_t *ctx,
const struct channel_id *cid,
const struct bitcoin_outpoint *funding,
u32 minimum_depth,
const struct height_states *blockheight_states,
u32 lease_expiry,
struct amount_sat funding_sats,
struct amount_msat local_msat,
const struct fee_states *fee_states TAKES,
const struct channel_config *local,
const struct channel_config *remote,
const struct basepoints *local_basepoints,
const struct basepoints *remote_basepoints,
const struct pubkey *local_funding_pubkey,
const struct pubkey *remote_funding_pubkey,
const struct channel_type *type TAKES,
bool option_wumbo,
enum side opener);
/**
* channel_txs: Get the current commitment and htlc txs for the channel.
* @ctx: tal context to allocate return value from.
* @channel: The channel to evaluate
* @htlc_map: Pointer to htlcs for each tx output (allocated off @ctx).
* @direct_outputs: If non-NULL, fill with pointers to the direct (non-HTLC) outputs (or NULL if none).
* @funding_wscript: Pointer to wscript for the funding tx output
* @per_commitment_point: Per-commitment point to determine keys
* @commitment_number: The index of this commitment.
* @side: which side to get the commitment transaction for
*
* Returns the unsigned commitment transaction for the committed state
* for @side, followed by the htlc transactions in output order and
* fills in @htlc_map, or NULL on key derivation failure.
*/
struct bitcoin_tx **channel_txs(const tal_t *ctx,
const struct htlc ***htlcmap,
struct wally_tx_output *direct_outputs[NUM_SIDES],
const u8 **funding_wscript,
const struct channel *channel,
const struct pubkey *per_commitment_point,
u64 commitment_number,
enum side side);
/* Version of `channel_txs` that lets you specify a custom funding outpoint
* and funding_sats.
*/
struct bitcoin_tx **channel_splice_txs(const tal_t *ctx,
const struct bitcoin_outpoint *funding,
struct amount_sat funding_sats,
const struct htlc ***htlcmap,
struct wally_tx_output *direct_outputs[NUM_SIDES],
const u8 **funding_wscript,
const struct channel *channel,
const struct pubkey *per_commitment_point,
u64 commitment_number,
enum side side,
s64 splice_amnt,
s64 remote_splice_amnt);
/**
* actual_feerate: what is the actual feerate for the local side.
* @channel: The channel state
* @theirsig: The other side's signature
*
* The fee calculated on a commitment transaction is a worst-case
* approximation. It's also possible that the desired feerate is not
* met, because the initiator sets it while the other side is adding many
* htlcs.
*
* This is the fee rate we actually care about, if we're going to check
* whether it's actually too low.
*/
u32 actual_feerate(const struct channel *channel,
const struct signature *theirsig);
/**
* channel_add_htlc: append an HTLC to channel if it can afford it
* @channel: The channel
* @offerer: the side offering the HTLC (to the other side).
* @id: unique HTLC id.
* @amount: amount in millisatoshi.
* @cltv_expiry: block number when HTLC can no longer be redeemed.
* @payment_hash: hash whose preimage can redeem HTLC.
* @routing: routing information (copied)
* @blinding: optional blinding information for this HTLC.
* @htlcp: optional pointer for resulting htlc: filled in if and only if CHANNEL_ERR_NONE.
* @err_immediate_failures: in some cases (dusty htlcs) we want to immediately
* fail the htlc; for peer incoming don't want to
* error, but rather mark it as failed and fail after
* it's been committed to (so set this to false)
*
* If this returns CHANNEL_ERR_NONE, the fee htlc was added and
* the output amounts adjusted accordingly. Otherwise nothing
* is changed.
*/
enum channel_add_err channel_add_htlc(struct channel *channel,
enum side sender,
u64 id,
struct amount_msat msatoshi,
u32 cltv_expiry,
const struct sha256 *payment_hash,
const u8 routing[TOTAL_PACKET_SIZE(ROUTING_INFO_SIZE)],
const struct pubkey *blinding TAKES,
struct htlc **htlcp,
struct amount_sat *htlc_fee,
bool err_immediate_failures);
/**
* channel_get_htlc: find an HTLC
* @channel: The channel
* @offerer: the side offering the HTLC.
* @id: unique HTLC id.
*/
struct htlc *channel_get_htlc(struct channel *channel, enum side sender, u64 id);
/**
* channel_fail_htlc: remove an HTLC, funds to the side which offered it.
* @channel: The channel state
* @owner: the side who offered the HTLC (opposite to that failing it)
* @id: unique HTLC id.
* @htlcp: optional pointer for failed htlc: filled in if and only if CHANNEL_ERR_REMOVE_OK.
*
* This will remove the htlc and credit the value of the HTLC (back)
* to its offerer.
*/
enum channel_remove_err channel_fail_htlc(struct channel *channel,
enum side owner, u64 id,
struct htlc **htlcp);
/**
* channel_fulfill_htlc: remove an HTLC, funds to side which accepted it.
* @channel: The channel state
* @owner: the side who offered the HTLC (opposite to that fulfilling it)
* @id: unique HTLC id.
* @htlcp: optional pointer for resulting htlc: filled in if and only if CHANNEL_ERR_FULFILL_OK.
*
* If the htlc exists, is not already fulfilled, the preimage is correct and
* HTLC committed at the recipient, this will add a pending change to
* remove the htlc and give the value of the HTLC to its recipient,
* and return CHANNEL_ERR_FULFILL_OK. Otherwise, it will return another error.
*/
enum channel_remove_err channel_fulfill_htlc(struct channel *channel,
enum side owner,
u64 id,
const struct preimage *preimage,
struct htlc **htlcp);
/**
* approx_max_feerate: what's the max opener could raise fee rate to?
* @channel: The channel state
*
* This is not exact! To check if their offer is valid, try
* channel_update_feerate.
*/
u32 approx_max_feerate(const struct channel *channel);
/**
* can_opener_afford_feerate: could the opener pay the fee?
* @channel: The channel state
* @feerate: The feerate in satoshi per 1000 bytes.
*/
bool can_opener_afford_feerate(const struct channel *channel, u32 feerate);
/**
* htlc_dust_ok: will this feerate keep our dusted htlc's beneath
* the updated feerate?
*
* @channel: The channel state
* @feerate_per_kw: new feerate to test ok'ness for
* @side: which side's htlcs to verify
*/
bool htlc_dust_ok(const struct channel *channel,
u32 feerate_per_kw,
enum side side);
/**
* channel_update_feerate: Change fee rate on non-opener side.
* @channel: The channel
* @feerate_per_kw: fee in satoshi per 1000 bytes.
*
* Returns true if it's affordable, otherwise does nothing.
*/
bool channel_update_feerate(struct channel *channel, u32 feerate_per_kw);
/*
* channel_update_blockheight: Change blockheight on non-opener side.
* @channel: The channel
* @blockheight: current blockheight
*/
void channel_update_blockheight(struct channel *channel, u32 blockheight);
/**
* channel_feerate: Get fee rate for this side of channel.
* @channel: The channel
* @side: the side
*/
u32 channel_feerate(const struct channel *channel, enum side side);
/**
* channel_sending_commit: commit all remote outstanding changes.
* @channel: the channel
* @htlcs: initially-empty tal_arr() for htlcs which changed state.
*
* This is where we commit to pending changes we've added; returns true if
* anything changed for the remote side (if not, don't send!) */
bool channel_sending_commit(struct channel *channel,
const struct htlc ***htlcs);
/**
* channel_rcvd_revoke_and_ack: accept ack on remote committed changes.
* @channel: the channel
* @htlcs: initially-empty tal_arr() for htlcs which changed state.
*
* This is where we commit to pending changes we've added; returns true if
* anything changed for our local commitment (ie. we have pending changes).
*/
bool channel_rcvd_revoke_and_ack(struct channel *channel,
const struct htlc ***htlcs);
/**
* channel_rcvd_commit: commit all local outstanding changes.
* @channel: the channel
* @htlcs: initially-empty tal_arr() for htlcs which changed state.
*
* This is where we commit to pending changes we've added; returns true if
* anything changed for our local commitment (ie. we had pending changes).
*/
bool channel_rcvd_commit(struct channel *channel,
const struct htlc ***htlcs);
/**
* channel_sending_revoke_and_ack: sending ack on local committed changes.
* @channel: the channel
*
* This is where we commit to pending changes we've added. Returns true if
* anything changed for the remote commitment (ie. send a new commit).*/
bool channel_sending_revoke_and_ack(struct channel *channel);
/**
* num_channel_htlcs: how many (live) HTLCs at all in channel?
* @channel: the channel
*/
size_t num_channel_htlcs(const struct channel *channel);
/**
* channel_force_htlcs: force these htlcs into the (new) channel
* @channel: the channel
* @htlcs: the htlcs to add (tal_arr) elements stolen.
*
* This is used for restoring a channel state.
*/
bool channel_force_htlcs(struct channel *channel,
const struct existing_htlc **htlcs);
/**
* dump_htlcs: debugging dump of all HTLCs
* @channel: the channel
* @prefix: the prefix to prepend to each line.
*
* Uses status_debug() on every HTLC.
*/
void dump_htlcs(const struct channel *channel, const char *prefix);
/**
* pending_updates: does this side have updates pending in channel?
* @channel: the channel
* @side: the side who is offering or failing/fulfilling HTLC, or feechange
* @uncommitted_ok: don't count uncommitted changes.
*/
bool pending_updates(const struct channel *channel, enum side side,
bool uncommitted_ok);
const char *channel_add_err_name(enum channel_add_err e);
const char *channel_remove_err_name(enum channel_remove_err e);
#endif /* LIGHTNING_CHANNELD_FULL_CHANNEL_H */
|
bf6780a79d51ae3e47a1831c3f31a7544d090330
|
b0f08154e3eebc7d8465efc57597e52d08d69c18
|
/src/connection/connection_sr.c
|
ab71633382b5b21a38bed8009cccdcb99977de4f
|
[
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
CUBRID/cubrid
|
8f71a0010243b72e43ba887d229210650f4e901e
|
3b952af33230839a1b561a78ecd4b773374b66f8
|
refs/heads/develop
| 2023-08-18T19:16:30.987583
| 2023-08-18T08:18:05
| 2023-08-18T08:18:05
| 52,080,367
| 287
| 294
|
NOASSERTION
| 2023-09-14T21:29:09
| 2016-02-19T10:25:32
|
C
|
UTF-8
|
C
| false
| false
| 78,228
|
c
|
connection_sr.c
|
/*
* Copyright 2008 Search Solution Corporation
* Copyright 2016 CUBRID Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/*
* connection_sr.c - Client/Server connection list management
*/
#ident "$Id$"
#include "config.h"
#if defined (WINDOWS)
#include <io.h>
#endif
#include <filesystem>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <assert.h>
#if defined(WINDOWS)
#include <winsock2.h>
#include <windows.h>
#else /* WINDOWS */
#include <sys/time.h>
#include <sys/ioctl.h>
#include <sys/uio.h>
#include <sys/socket.h>
#include <netinet/in.h>
#endif /* WINDOWS */
#if defined(_AIX)
#include <sys/select.h>
#endif /* _AIX */
#if defined(SOLARIS)
#include <sys/filio.h>
#include <netdb.h>
#endif /* SOLARIS */
#if defined(SOLARIS) || defined(LINUX)
#include <unistd.h>
#endif /* SOLARIS || LINUX */
#include "porting.h"
#include "error_manager.h"
#include "connection_globals.h"
#include "filesys.hpp"
#include "filesys_temp.hpp"
#include "memory_alloc.h"
#include "environment_variable.h"
#include "system_parameter.h"
#include "critical_section.h"
#include "log_manager.h"
#include "object_representation.h"
#include "connection_error.h"
#include "log_impl.h"
#include "session.h"
#if defined(WINDOWS)
#include "wintcp.h"
#else /* WINDOWS */
#include "tcp.h"
#endif /* WINDOWS */
#include "connection_sr.h"
#include "server_support.h"
#include "thread_manager.hpp" // for thread_get_thread_entry_info
#ifdef PACKET_TRACE
#define TRACE(string, arg) \
do { \
er_log_debug(ARG_FILE_LINE, string, arg); \
} \
while(0);
#else /* PACKET_TRACE */
#define TRACE(string, arg)
#endif /* PACKET_TRACE */
/* data wait queue */
typedef struct css_wait_queue_entry
{
char **buffer;
int *size;
int *rc;
THREAD_ENTRY *thrd_entry; /* thread waiting for data */
struct css_wait_queue_entry *next;
unsigned int key;
} CSS_WAIT_QUEUE_ENTRY;
typedef struct queue_search_arg
{
CSS_QUEUE_ENTRY *entry_ptr;
int key;
int remove_entry;
} CSS_QUEUE_SEARCH_ARG;
typedef struct wait_queue_search_arg
{
CSS_WAIT_QUEUE_ENTRY *entry_ptr;
unsigned int key;
int remove_entry;
} CSS_WAIT_QUEUE_SEARCH_ARG;
#define NUM_NORMAL_CLIENTS (prm_get_integer_value(PRM_ID_CSS_MAX_CLIENTS))
#define RMUTEX_NAME_CONN_ENTRY "CONN_ENTRY"
static const int CSS_MAX_CLIENT_ID = INT_MAX - 1;
static int css_Client_id = 0;
static pthread_mutex_t css_Client_id_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t css_Conn_rule_lock = PTHREAD_MUTEX_INITIALIZER;
static CSS_CONN_ENTRY *css_Free_conn_anchor = NULL;
static int css_Num_free_conn = 0;
static int css_Num_max_conn = 101; /* default max_clients + 1 for conn with master */
CSS_CONN_ENTRY *css_Conn_array = NULL;
CSS_CONN_ENTRY *css_Active_conn_anchor = NULL;
static int css_Num_active_conn = 0;
SYNC_RWLOCK css_Rwlock_active_conn_anchor;
SYNC_RWLOCK css_Rwlock_free_conn_anchor;
static LAST_ACCESS_STATUS *css_Access_status_anchor = NULL;
int css_Num_access_user = 0;
/* This will handle new connections */
css_error_code (*css_Connect_handler) (CSS_CONN_ENTRY *) = NULL;
/* This will handle new requests per connection */
CSS_THREAD_FN css_Request_handler = NULL;
/* This will handle closed connection errors */
CSS_THREAD_FN css_Connection_error_handler = NULL;
#define CSS_CONN_IDX(conn_arg) ((conn_arg) - css_Conn_array)
#define CSS_FREE_CONN_MSG "Free count = %d, head = %d"
#define CSS_FREE_CONN_ARGS css_Num_free_conn, CSS_CONN_IDX (css_Free_conn_anchor)
#define CSS_ACTIVE_CONN_MSG "Active count = %d, head = %d"
#define CSS_ACTIVE_CONN_ARGS css_Num_active_conn, CSS_CONN_IDX (css_Active_conn_anchor)
static int css_get_next_client_id (void);
static CSS_CONN_ENTRY *css_common_connect (CSS_CONN_ENTRY * conn, unsigned short *rid, const char *host_name,
int connect_type, const char *server_name, int server_name_length, int port);
static int css_abort_request (CSS_CONN_ENTRY * conn, unsigned short rid);
static void css_dealloc_conn (CSS_CONN_ENTRY * conn);
static unsigned int css_make_eid (unsigned short entry_id, unsigned short rid);
static CSS_QUEUE_ENTRY *css_claim_queue_entry (CSS_CONN_ENTRY * conn);
static void css_retire_queue_entry (CSS_CONN_ENTRY * conn, CSS_QUEUE_ENTRY * entry);
static void css_free_queue_entry_list (CSS_CONN_ENTRY * conn);
static CSS_WAIT_QUEUE_ENTRY *css_claim_wait_queue_entry (CSS_CONN_ENTRY * conn);
static void css_retire_wait_queue_entry (CSS_CONN_ENTRY * conn, CSS_WAIT_QUEUE_ENTRY * entry);
static void css_free_wait_queue_list (CSS_CONN_ENTRY * conn);
static NET_HEADER *css_claim_net_header_entry (CSS_CONN_ENTRY * conn);
static void css_retire_net_header_entry (CSS_CONN_ENTRY * conn, NET_HEADER * entry);
static void css_free_net_header_list (CSS_CONN_ENTRY * conn);
static CSS_QUEUE_ENTRY *css_make_queue_entry (CSS_CONN_ENTRY * conn, unsigned int key, char *buffer,
int size, int rc, int transid, int invalidate_snapshot, int db_error);
static void css_free_queue_entry (CSS_CONN_ENTRY * conn, CSS_QUEUE_ENTRY * entry);
static css_error_code css_add_queue_entry (CSS_CONN_ENTRY * conn, CSS_LIST * list, unsigned short request_id,
char *buffer, int buffer_size, int rc, int transid, int invalidate_snapshot,
int db_error);
static CSS_QUEUE_ENTRY *css_find_queue_entry (CSS_LIST * list, unsigned int key);
static CSS_QUEUE_ENTRY *css_find_and_remove_queue_entry (CSS_LIST * list, unsigned int key);
static CSS_WAIT_QUEUE_ENTRY *css_make_wait_queue_entry (CSS_CONN_ENTRY * conn, unsigned int key, char **buffer,
int *size, int *rc);
static void css_free_wait_queue_entry (CSS_CONN_ENTRY * conn, CSS_WAIT_QUEUE_ENTRY * entry);
static CSS_WAIT_QUEUE_ENTRY *css_add_wait_queue_entry (CSS_CONN_ENTRY * conn, CSS_LIST * list,
unsigned short request_id, char **buffer, int *buffer_size,
int *rc);
static CSS_WAIT_QUEUE_ENTRY *css_find_and_remove_wait_queue_entry (CSS_LIST * list, unsigned int key);
static void css_process_close_packet (CSS_CONN_ENTRY * conn);
static void css_process_abort_packet (CSS_CONN_ENTRY * conn, unsigned short request_id);
static bool css_is_request_aborted (CSS_CONN_ENTRY * conn, unsigned short request_id);
static void clear_wait_queue_entry_and_free_buffer (THREAD_ENTRY * thrdp, CSS_CONN_ENTRY * conn, unsigned short rid,
char **bufferp);
static int css_return_queued_data_timeout (CSS_CONN_ENTRY * conn, unsigned short rid, char **buffer, int *bufsize,
int *rc, int waitsec);
static void css_queue_data_packet (CSS_CONN_ENTRY * conn, unsigned short request_id, const NET_HEADER * header,
THREAD_ENTRY ** wait_thrd);
static void css_queue_error_packet (CSS_CONN_ENTRY * conn, unsigned short request_id, const NET_HEADER * header);
static css_error_code css_queue_command_packet (CSS_CONN_ENTRY * conn, unsigned short request_id,
const NET_HEADER * header, int size);
static bool css_is_valid_request_id (CSS_CONN_ENTRY * conn, unsigned short request_id);
static void css_remove_unexpected_packets (CSS_CONN_ENTRY * conn, unsigned short request_id);
static css_error_code css_queue_packet (CSS_CONN_ENTRY * conn, int type, unsigned short request_id,
const NET_HEADER * header, int size);
static int css_remove_and_free_queue_entry (void *data, void *arg);
static int css_remove_and_free_wait_queue_entry (void *data, void *arg);
static int css_increment_num_conn_internal (CSS_CONN_RULE_INFO * conn_rule_info);
static void css_decrement_num_conn_internal (CSS_CONN_RULE_INFO * conn_rule_info);
/*
* get_next_client_id() -
* return: client id
*/
static int
css_get_next_client_id (void)
{
static bool overflow = false;
int next_client_id, rv, i;
bool retry;
rv = pthread_mutex_lock (&css_Client_id_lock);
if (rv != 0)
{
er_set_with_oserror (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_CSS_PTHREAD_MUTEX_LOCK, 0);
return ER_FAILED;
}
do
{
css_Client_id++;
if (css_Client_id == CSS_MAX_CLIENT_ID)
{
css_Client_id = 1;
overflow = true;
}
retry = false;
for (i = 0; overflow && i < css_Num_max_conn; i++)
{
if (css_Conn_array[i].client_id == css_Client_id)
{
retry = true;
break;
}
}
}
while (retry);
next_client_id = css_Client_id;
rv = pthread_mutex_unlock (&css_Client_id_lock);
if (rv != 0)
{
er_set_with_oserror (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_CSS_PTHREAD_MUTEX_UNLOCK, 0);
return ER_FAILED;
}
return next_client_id;
}
/*
* css_initialize_conn() - initialize connection entry
* return: void
* conn(in):
* fd(in):
*/
int
css_initialize_conn (CSS_CONN_ENTRY * conn, SOCKET fd)
{
int err;
conn->fd = fd;
conn->request_id = 0;
conn->status = CONN_OPEN;
conn->set_tran_index (NULL_TRAN_INDEX);
conn->init_pending_request ();
conn->invalidate_snapshot = 1;
conn->in_method = false;
err = css_get_next_client_id ();
if (err < 0)
{
return ER_CSS_CONN_INIT;
}
conn->client_id = err;
conn->db_error = 0;
conn->in_transaction = false;
conn->in_flashback = false;
conn->reset_on_commit = false;
conn->stop_talk = false;
conn->ignore_repl_delay = false;
conn->stop_phase = THREAD_STOP_WORKERS_EXCEPT_LOGWR;
conn->version_string = NULL;
/* ignore connection handler thread */
conn->free_queue_list = NULL;
conn->free_queue_count = 0;
conn->free_wait_queue_list = NULL;
conn->free_wait_queue_count = 0;
conn->free_net_header_list = NULL;
conn->free_net_header_count = 0;
conn->session_id = DB_EMPTY_SESSION;
#if defined(SERVER_MODE)
conn->session_p = NULL;
conn->client_type = DB_CLIENT_TYPE_UNKNOWN;
#endif
err = css_initialize_list (&conn->request_queue, 0);
if (err != NO_ERROR)
{
return ER_CSS_CONN_INIT;
}
err = css_initialize_list (&conn->data_queue, 0);
if (err != NO_ERROR)
{
return ER_CSS_CONN_INIT;
}
err = css_initialize_list (&conn->data_wait_queue, 0);
if (err != NO_ERROR)
{
return ER_CSS_CONN_INIT;
}
err = css_initialize_list (&conn->abort_queue, 0);
if (err != NO_ERROR)
{
return ER_CSS_CONN_INIT;
}
err = css_initialize_list (&conn->buffer_queue, 0);
if (err != NO_ERROR)
{
return ER_CSS_CONN_INIT;
}
err = css_initialize_list (&conn->error_queue, 0);
if (err != NO_ERROR)
{
return ER_CSS_CONN_INIT;
}
return NO_ERROR;
}
/*
* css_shutdown_conn() - close connection entry
* return: void
* conn(in):
*/
void
css_shutdown_conn (CSS_CONN_ENTRY * conn)
{
int r;
r = rmutex_lock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
if (!IS_INVALID_SOCKET (conn->fd))
{
/* if this is the PC, it also shuts down Winsock */
css_shutdown_socket (conn->fd);
conn->fd = INVALID_SOCKET;
}
if (conn->status == CONN_OPEN || conn->status == CONN_CLOSING)
{
conn->status = CONN_CLOSED;
conn->stop_talk = false;
conn->in_flashback = false;
conn->stop_phase = THREAD_STOP_WORKERS_EXCEPT_LOGWR;
if (conn->version_string)
{
free_and_init (conn->version_string);
}
css_remove_all_unexpected_packets (conn);
css_finalize_list (&conn->request_queue);
css_finalize_list (&conn->data_queue);
css_finalize_list (&conn->data_wait_queue);
css_finalize_list (&conn->abort_queue);
css_finalize_list (&conn->buffer_queue);
css_finalize_list (&conn->error_queue);
}
if (conn->free_queue_list != NULL)
{
assert (conn->free_queue_count > 0);
css_free_queue_entry_list (conn);
}
if (conn->free_wait_queue_list != NULL)
{
assert (conn->free_wait_queue_count > 0);
css_free_wait_queue_list (conn);
}
if (conn->free_net_header_list != NULL)
{
assert (conn->free_net_header_count > 0);
css_free_net_header_list (conn);
}
#if defined(SERVER_MODE)
if (conn->session_p)
{
session_state_decrease_ref_count (NULL, conn->session_p);
conn->session_p = NULL;
conn->session_id = DB_EMPTY_SESSION;
}
#endif
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
}
/*
* css_init_conn_list() - initialize connection list
* return: NO_ERROR if success, or error code
*/
int
css_init_conn_list (void)
{
int i, err;
CSS_CONN_ENTRY *conn;
css_init_conn_rules ();
css_Num_max_conn = css_get_max_conn () + NUM_MASTER_CHANNEL;
if (css_Conn_array != NULL)
{
return NO_ERROR;
}
err = rwlock_initialize (CSS_RWLOCK_ACTIVE_CONN_ANCHOR, CSS_RWLOCK_ACTIVE_CONN_ANCHOR_NAME);
if (err != NO_ERROR)
{
ASSERT_ERROR ();
return err;
}
err = rwlock_initialize (CSS_RWLOCK_FREE_CONN_ANCHOR, CSS_RWLOCK_FREE_CONN_ANCHOR_NAME);
if (err != NO_ERROR)
{
ASSERT_ERROR ();
(void) rwlock_finalize (CSS_RWLOCK_ACTIVE_CONN_ANCHOR);
return err;
}
/*
* allocate NUM_MASTER_CHANNEL + the total number of
* conn entries
*/
css_Conn_array = (CSS_CONN_ENTRY *) malloc (sizeof (CSS_CONN_ENTRY) * (css_Num_max_conn));
if (css_Conn_array == NULL)
{
er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1,
sizeof (CSS_CONN_ENTRY) * (css_Num_max_conn));
err = ER_OUT_OF_VIRTUAL_MEMORY;
goto error;
}
/* initialize all CSS_CONN_ENTRY */
for (i = 0; i < css_Num_max_conn; i++)
{
conn = &css_Conn_array[i];
conn->idx = i;
err = css_initialize_conn (conn, -1);
if (err != NO_ERROR)
{
er_set_with_oserror (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_CSS_CONN_INIT, 0);
err = ER_CSS_CONN_INIT;
goto error;
}
err = rmutex_initialize (&conn->rmutex, RMUTEX_NAME_CONN_ENTRY);
if (err != NO_ERROR)
{
er_set_with_oserror (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_CSS_CONN_INIT, 0);
err = ER_CSS_CONN_INIT;
goto error;
}
if (i < css_Num_max_conn - 1)
{
conn->next = &css_Conn_array[i + 1];
}
else
{
conn->next = NULL;
}
}
/* initialize active conn list, used for stopping all threads */
css_Active_conn_anchor = NULL;
css_Free_conn_anchor = &css_Conn_array[0];
css_Num_free_conn = css_Num_max_conn;
return NO_ERROR;
error:
(void) rwlock_finalize (CSS_RWLOCK_ACTIVE_CONN_ANCHOR);
(void) rwlock_finalize (CSS_RWLOCK_FREE_CONN_ANCHOR);
if (css_Conn_array != NULL)
{
free_and_init (css_Conn_array);
}
return err;
}
/*
* css_final_conn_list() - free connection list
* return: void
*/
void
css_final_conn_list (void)
{
CSS_CONN_ENTRY *conn, *next;
int i;
if (css_Active_conn_anchor != NULL)
{
for (conn = css_Active_conn_anchor; conn != NULL; conn = next)
{
next = conn->next;
css_shutdown_conn (conn);
css_dealloc_conn (conn);
css_Num_active_conn--;
assert (css_Num_active_conn >= 0);
}
css_Active_conn_anchor = NULL;
}
assert (css_Num_active_conn == 0);
assert (css_Active_conn_anchor == NULL);
if (css_Conn_array != NULL)
{
for (i = 0; i < css_Num_max_conn; i++)
{
conn = &css_Conn_array[i];
#if defined(SERVER_MODE)
assert (conn->idx == i);
#endif
(void) rmutex_finalize (&conn->rmutex);
}
free_and_init (css_Conn_array);
(void) rwlock_finalize (CSS_RWLOCK_ACTIVE_CONN_ANCHOR);
(void) rwlock_finalize (CSS_RWLOCK_FREE_CONN_ANCHOR);
}
}
/*
* css_make_conn() - make new connection entry, but not insert into active
* conn list
* return: new connection entry
* fd(in): socket discriptor
*/
CSS_CONN_ENTRY *
css_make_conn (SOCKET fd)
{
CSS_CONN_ENTRY *conn = NULL;
int r;
START_EXCLUSIVE_ACCESS_FREE_CONN_ANCHOR (r);
if (css_Free_conn_anchor != NULL)
{
conn = css_Free_conn_anchor;
css_Free_conn_anchor = css_Free_conn_anchor->next;
conn->next = NULL;
css_Num_free_conn--;
assert (css_Num_free_conn >= 0);
}
CSS_LOG_STACK ("css_make_conn: conn = %d, " CSS_FREE_CONN_MSG, CSS_CONN_IDX (conn), CSS_FREE_CONN_ARGS);
END_EXCLUSIVE_ACCESS_FREE_CONN_ANCHOR (r);
if (conn != NULL)
{
if (css_initialize_conn (conn, fd) != NO_ERROR)
{
er_set_with_oserror (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_CSS_CONN_INIT, 0);
return NULL;
}
}
return conn;
}
/*
* css_insert_into_active_conn_list() - insert/remove into/from active conn
* list. this operation must be called
* after/before css_free_conn etc.
* return: void
* conn(in): connection entry will be inserted
*/
void
css_insert_into_active_conn_list (CSS_CONN_ENTRY * conn)
{
int r;
START_EXCLUSIVE_ACCESS_ACTIVE_CONN_ANCHOR (r);
CSS_LOG_STACK ("css_insert_into_active_conn_list conn = %d, prev " CSS_ACTIVE_CONN_MSG, CSS_CONN_IDX (conn),
CSS_ACTIVE_CONN_ARGS);
conn->next = css_Active_conn_anchor;
css_Active_conn_anchor = conn;
css_Num_active_conn++;
assert (css_Num_active_conn > 0);
assert (css_Num_active_conn <= css_Num_max_conn);
END_EXCLUSIVE_ACCESS_ACTIVE_CONN_ANCHOR (r);
}
/*
* css_dealloc_conn_rmutex() - free rmutex of connection entry
* return: void
* conn(in): connection entry
*/
void
css_dealloc_conn_rmutex (CSS_CONN_ENTRY * conn)
{
(void) rmutex_finalize (&conn->rmutex);
}
/*
* css_dealloc_conn() - free connection entry
* return: void
* conn(in): connection entry will be free
*/
static void
css_dealloc_conn (CSS_CONN_ENTRY * conn)
{
int r;
START_EXCLUSIVE_ACCESS_FREE_CONN_ANCHOR (r);
CSS_LOG_STACK ("css_dealloc_conn conn = %d, prev " CSS_FREE_CONN_MSG, CSS_CONN_IDX (conn), CSS_FREE_CONN_ARGS);
conn->next = css_Free_conn_anchor;
css_Free_conn_anchor = conn;
css_Num_free_conn++;
assert (css_Num_free_conn > 0);
assert (css_Num_free_conn <= css_Num_max_conn);
END_EXCLUSIVE_ACCESS_FREE_CONN_ANCHOR (r);
}
/*
* css_get_num_free_conn -
*/
int
css_get_num_free_conn (void)
{
return css_Num_free_conn;
}
/*
* css_increment_num_conn_internal() - increments conn counter
* based on client type
* return: error code
* client_type(in): a type of a client trying
* to release the connection
*/
static int
css_increment_num_conn_internal (CSS_CONN_RULE_INFO * conn_rule_info)
{
int error = NO_ERROR;
switch (conn_rule_info->rule)
{
case CR_NORMAL_ONLY:
if (conn_rule_info->num_curr_conn == conn_rule_info->max_num_conn)
{
error = ER_CSS_CLIENTS_EXCEEDED;
}
else
{
conn_rule_info->num_curr_conn++;
}
break;
case CR_NORMAL_FIRST:
/* tries to use a normal conn first */
if (css_increment_num_conn_internal (&css_Conn_rules[CSS_CR_NORMAL_ONLY_IDX]) != NO_ERROR)
{
/* if normal conns are all occupied, uses a reserved conn */
if (conn_rule_info->num_curr_conn == conn_rule_info->max_num_conn)
{
error = ER_CSS_CLIENTS_EXCEEDED;
}
else
{
conn_rule_info->num_curr_conn++;
assert (conn_rule_info->num_curr_conn <= conn_rule_info->max_num_conn);
}
}
break;
case CR_RESERVED_FIRST:
/* tries to use a reserved conn first */
if (conn_rule_info->num_curr_conn < conn_rule_info->max_num_conn)
{
conn_rule_info->num_curr_conn++;
}
else /* uses a normal conn if no reserved conn is available */
{
if (css_increment_num_conn_internal (&css_Conn_rules[CSS_CR_NORMAL_ONLY_IDX]) != NO_ERROR)
{
error = ER_CSS_CLIENTS_EXCEEDED;
}
else
{
/* also increments its own conn counter */
conn_rule_info->num_curr_conn++;
assert (conn_rule_info->num_curr_conn <=
(css_Conn_rules[CSS_CR_NORMAL_ONLY_IDX].max_num_conn + conn_rule_info->max_num_conn));
}
}
break;
default:
assert (false);
break;
}
return error;
}
/*
* css_decrement_num_conn_internal() - decrements conn counter
* based on client type
* return:
* client_type(in): a type of a client trying
* to release the connection
*/
static void
css_decrement_num_conn_internal (CSS_CONN_RULE_INFO * conn_rule_info)
{
int i;
switch (conn_rule_info->rule)
{
case CR_NORMAL_ONLY:
/* When a normal client decrements the counter, it should first check that other normal-first-reserved-last
* clients need to take the released connection first. */
for (i = 1; i < css_Conn_rules_size; i++)
{
if (css_Conn_rules[i].rule == CR_NORMAL_FIRST && css_Conn_rules[i].num_curr_conn > 0)
{
css_Conn_rules[i].num_curr_conn--;
return;
}
}
conn_rule_info->num_curr_conn--;
break;
case CR_NORMAL_FIRST:
/* decrements reserved conn counter first if exists */
if (conn_rule_info->num_curr_conn > 0)
{
conn_rule_info->num_curr_conn--;
}
else /* decrements normal conn counter if no reserved conn is in use */
{
css_decrement_num_conn_internal (&css_Conn_rules[CSS_CR_NORMAL_ONLY_IDX]);
}
break;
case CR_RESERVED_FIRST:
/* decrements normal conn counter if exists */
if (conn_rule_info->num_curr_conn > conn_rule_info->max_num_conn)
{
css_decrement_num_conn_internal (&css_Conn_rules[CSS_CR_NORMAL_ONLY_IDX]);
}
/* also decrements its own conn counter */
conn_rule_info->num_curr_conn--;
break;
default:
assert (false);
break;
}
assert (conn_rule_info->num_curr_conn >= 0);
return;
}
/*
* css_increment_num_conn() - increment a connection counter
* and check if a client can take its connection
* return: error code
* client_type(in): a type of a client trying
* to take the connection
*/
int
css_increment_num_conn (BOOT_CLIENT_TYPE client_type)
{
int i;
int error = NO_ERROR;
for (i = 0; i < css_Conn_rules_size; i++)
{
if (css_Conn_rules[i].check_client_type_fn (client_type))
{
pthread_mutex_lock (&css_Conn_rule_lock);
error = css_increment_num_conn_internal (&css_Conn_rules[i]);
pthread_mutex_unlock (&css_Conn_rule_lock);
break;
}
}
return error;
}
/*
* css_decrement_num_conn() - decrement a connection counter
* return:
* client_type(in): a type of a client trying
* to release the connection
*/
void
css_decrement_num_conn (BOOT_CLIENT_TYPE client_type)
{
int i;
if (client_type == DB_CLIENT_TYPE_UNKNOWN)
{
return;
}
for (i = 0; i < css_Conn_rules_size; i++)
{
if (css_Conn_rules[i].check_client_type_fn (client_type))
{
pthread_mutex_lock (&css_Conn_rule_lock);
css_decrement_num_conn_internal (&css_Conn_rules[i]);
pthread_mutex_unlock (&css_Conn_rule_lock);
break;
}
}
return;
}
/*
* css_free_conn() - destroy all connection related structures, and free conn
* entry, delete from css_Active_conn_anchor list
* return: void
* conn(in): connection entry will be free
*/
void
css_free_conn (CSS_CONN_ENTRY * conn)
{
CSS_CONN_ENTRY *p, *prev = NULL, *next;
int r;
START_EXCLUSIVE_ACCESS_ACTIVE_CONN_ANCHOR (r);
/* find and remove from active conn list */
for (p = css_Active_conn_anchor; p != NULL; p = next)
{
next = p->next;
if (p == conn)
{
if (prev == NULL)
{
css_Active_conn_anchor = next;
}
else
{
prev->next = next;
}
css_Num_active_conn--;
assert (css_Num_active_conn >= 0);
assert (css_Num_active_conn < css_Num_max_conn);
CSS_LOG_STACK ("css_free_conn - removed conn = %d from " CSS_ACTIVE_CONN_MSG, CSS_CONN_IDX (conn),
CSS_ACTIVE_CONN_ARGS);
break;
}
prev = p;
}
if (p == NULL)
{
CSS_LOG_STACK ("css_free_conn - not found conn = %p in " CSS_ACTIVE_CONN_MSG, conn, CSS_ACTIVE_CONN_ARGS);
}
css_shutdown_conn (conn);
css_dealloc_conn (conn);
css_decrement_num_conn (conn->client_type);
END_EXCLUSIVE_ACCESS_ACTIVE_CONN_ANCHOR (r);
}
/*
* css_print_conn_entry_info() - print connection entry information to stderr
* return: void
* conn(in): connection entry
*/
void
css_print_conn_entry_info (CSS_CONN_ENTRY * conn)
{
fprintf (stderr,
"CONN_ENTRY: %p, next(%p), idx(%d),fd(%lld),request_id(%d),transaction_id(%d),client_id(%d)\n",
conn, conn->next, conn->idx, (long long) conn->fd, conn->request_id, conn->get_tran_index (),
conn->client_id);
}
/*
* css_print_conn_list() - print active connection list to stderr
* return: void
*/
void
css_print_conn_list (void)
{
CSS_CONN_ENTRY *conn, *next;
int i, r;
if (css_Active_conn_anchor != NULL)
{
START_SHARED_ACCESS_ACTIVE_CONN_ANCHOR (r);
fprintf (stderr, "active conn list (%d)\n", css_Num_active_conn);
for (conn = css_Active_conn_anchor, i = 0; conn != NULL; conn = next, i++)
{
next = conn->next;
css_print_conn_entry_info (conn);
}
assert (i == css_Num_active_conn);
END_SHARED_ACCESS_ACTIVE_CONN_ANCHOR (r);
}
}
/*
* css_print_free_conn_list() - print free connection list to stderr
* return: void
*/
void
css_print_free_conn_list (void)
{
CSS_CONN_ENTRY *conn, *next;
int i, r;
if (css_Free_conn_anchor != NULL)
{
START_SHARED_ACCESS_FREE_CONN_ANCHOR (r);
fprintf (stderr, "free conn list (%d)\n", css_Num_free_conn);
for (conn = css_Free_conn_anchor, i = 0; conn != NULL; conn = next, i++)
{
next = conn->next;
css_print_conn_entry_info (conn);
}
assert (i == css_Num_free_conn);
END_SHARED_ACCESS_FREE_CONN_ANCHOR (r);
}
}
/*
* css_register_handler_routines() - enroll handler routines
* return: void
* connect_handler(in): connection handler function pointer
* conn(in): connection entry
* request_handler(in): request handler function pointer
* connection_error_handler(in): error handler function pointer
*
* Note: This is the routine that will enroll various handler routines
* that the client/server interface software may use. Any of these
* routines may be given a NULL value in which case a default routine
* will be used, or nothing will be done.
*
* The connect handler is called when a new connection is made.
*
* The request handler is called to handle a new request. This must
* return non zero, otherwise, the server will halt.
*
* The abort handler is called by the server when an abort command
* is sent from the client.
*
* The alloc function is called instead of malloc when new buffers
* are to be created.
*
* The free function is called when a buffer is to be released.
*
* The error handler function is called when the client/server system
* detects an error it considers to be fatal.
*/
void
css_register_handler_routines (css_error_code (*connect_handler) (CSS_CONN_ENTRY * conn),
CSS_THREAD_FN request_handler, CSS_THREAD_FN connection_error_handler)
{
css_Connect_handler = connect_handler;
css_Request_handler = request_handler;
if (connection_error_handler)
{
css_Connection_error_handler = connection_error_handler;
}
}
/*
* css_common_connect() - actually try to make a connection to a server.
* return: connection entry if success, or NULL
* conn(in): connection entry will be connected
* rid(out): request id
* host_name(in): host name of server
* connect_type(in):
* server_name(in):
* server_name_length(in):
* port(in):
*/
static CSS_CONN_ENTRY *
css_common_connect (CSS_CONN_ENTRY * conn, unsigned short *rid,
const char *host_name, int connect_type, const char *server_name, int server_name_length, int port)
{
SOCKET fd;
fd = css_tcp_client_open ((char *) host_name, port);
if (!IS_INVALID_SOCKET (fd))
{
conn->fd = fd;
if (css_send_magic (conn) != NO_ERRORS)
{
return NULL;
}
if (css_send_request (conn, connect_type, rid, server_name, server_name_length) == NO_ERRORS)
{
return conn;
}
}
return NULL;
}
/*
* css_connect_to_master_server() - Connect to the master from the server.
* return: connection entry if success, or NULL
* master_port_id(in):
* server_name(in): name + version
* name_length(in):
*/
CSS_CONN_ENTRY *
css_connect_to_master_server (int master_port_id, const char *server_name, int name_length)
{
char hname[CUB_MAXHOSTNAMELEN];
CSS_CONN_ENTRY *conn;
unsigned short rid;
int response, response_buff;
int server_port_id;
int connection_protocol;
#if !defined(WINDOWS)
std::string pname;
int datagram_fd, socket_fd;
#endif
css_Service_id = master_port_id;
if (GETHOSTNAME (hname, CUB_MAXHOSTNAMELEN) != 0)
{
return NULL;
}
conn = css_make_conn (0);
if (conn == NULL)
{
er_set_with_oserror (ER_ERROR_SEVERITY, ARG_FILE_LINE, ERR_CSS_ERROR_DURING_SERVER_CONNECT, 1, server_name);
return NULL;
}
/* select the connection protocol */
if (css_Server_use_new_connection_protocol)
{
// Windows
connection_protocol = SERVER_REQUEST_NEW;
}
else
{
// Linux and Unix
connection_protocol = SERVER_REQUEST;
}
if (css_common_connect (conn, &rid, hname, connection_protocol, server_name, name_length, master_port_id) == NULL)
{
goto fail_end;
}
if (css_readn (conn->fd, (char *) &response_buff, sizeof (int), -1) != sizeof (int))
{
goto fail_end;
}
response = ntohl (response_buff);
TRACE ("css_connect_to_master_server received %d as response from master\n", response);
switch (response)
{
case SERVER_ALREADY_EXISTS:
er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ERR_CSS_SERVER_ALREADY_EXISTS, 1, server_name);
goto fail_end;
case SERVER_REQUEST_ACCEPTED_NEW:
/*
* Master requests a new-style connect, must go get
* our port id and set up our connection socket.
* For drivers, we don't need a connection socket and we
* don't want to allocate a bunch of them. Let a flag variable
* control whether or not we actually create one of these.
*/
if (css_Server_inhibit_connection_socket)
{
server_port_id = -1;
}
else
{
server_port_id = css_open_server_connection_socket ();
}
response = htonl (server_port_id);
css_net_send (conn, (char *) &response, sizeof (int), -1);
/* this connection remains our only contact with the master */
return conn;
case SERVER_REQUEST_ACCEPTED:
#if defined(WINDOWS)
/* PC's can't handle this style of connection at all */
er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ERR_CSS_ERROR_DURING_SERVER_CONNECT, 1, server_name);
goto fail_end;
#else /* WINDOWS */
/* send the "pathname" for the datagram */
/* be sure to open the datagram first. */
pname = std::filesystem::temp_directory_path ();
pname += "/cubrid_tcp_setup_server" + std::to_string (getpid ());
(void) unlink (pname.c_str ()); // make sure file is deleted
if (!css_tcp_setup_server_datagram (pname.c_str (), &socket_fd))
{
(void) unlink (pname.c_str ());
er_set_with_oserror (ER_ERROR_SEVERITY, ARG_FILE_LINE, ERR_CSS_ERROR_DURING_SERVER_CONNECT, 1, server_name);
goto fail_end;
}
if (css_send_data (conn, rid, pname.c_str (), pname.length () + 1) != NO_ERRORS)
{
(void) unlink (pname.c_str ());
close (socket_fd);
er_set_with_oserror (ER_ERROR_SEVERITY, ARG_FILE_LINE, ERR_CSS_ERROR_DURING_SERVER_CONNECT, 1, server_name);
goto fail_end;
}
if (!css_tcp_listen_server_datagram (socket_fd, &datagram_fd))
{
(void) unlink (pname.c_str ());
close (socket_fd);
er_set_with_oserror (ER_ERROR_SEVERITY, ARG_FILE_LINE, ERR_CSS_ERROR_DURING_SERVER_CONNECT, 1, server_name);
goto fail_end;
}
// success
(void) unlink (pname.c_str ());
css_free_conn (conn);
close (socket_fd);
return (css_make_conn (datagram_fd));
#endif /* WINDOWS */
}
fail_end:
css_free_conn (conn);
return NULL;
}
/*
* css_find_conn_by_tran_index() - find connection entry having given
* transaction id
* return: connection entry if find, or NULL
* tran_index(in): transaction id
*/
CSS_CONN_ENTRY *
css_find_conn_by_tran_index (int tran_index)
{
CSS_CONN_ENTRY *conn = NULL, *next;
int r;
if (css_Active_conn_anchor != NULL)
{
START_SHARED_ACCESS_ACTIVE_CONN_ANCHOR (r);
for (conn = css_Active_conn_anchor; conn != NULL; conn = next)
{
next = conn->next;
if (conn->get_tran_index () == tran_index)
{
break;
}
}
END_SHARED_ACCESS_ACTIVE_CONN_ANCHOR (r);
}
return conn;
}
/*
* css_find_conn_from_fd() - find a connection having given socket fd.
* return: connection entry if find, or NULL
* fd(in): socket fd
*/
CSS_CONN_ENTRY *
css_find_conn_from_fd (SOCKET fd)
{
CSS_CONN_ENTRY *conn = NULL, *next;
int r;
if (css_Active_conn_anchor != NULL)
{
START_SHARED_ACCESS_ACTIVE_CONN_ANCHOR (r);
for (conn = css_Active_conn_anchor; conn != NULL; conn = next)
{
next = conn->next;
if (conn->fd == fd)
{
break;
}
}
END_SHARED_ACCESS_ACTIVE_CONN_ANCHOR (r);
}
return conn;
}
/*
* css_get_session_ids_for_active_connections () - get active session ids
* return : error code or NO_ERROR
* session_ids (out) : holder for session ids
* count (out) : number of session ids
*/
int
css_get_session_ids_for_active_connections (SESSION_ID ** session_ids, int *count)
{
CSS_CONN_ENTRY *conn = NULL, *next = NULL;
SESSION_ID *sessions_p = NULL;
int error = NO_ERROR, i = 0, r;
assert (count != NULL);
if (count == NULL)
{
error = ER_FAILED;
goto error_return;
}
if (css_Active_conn_anchor == NULL)
{
*session_ids = NULL;
*count = 0;
return NO_ERROR;
}
START_SHARED_ACCESS_ACTIVE_CONN_ANCHOR (r);
*count = css_Num_active_conn;
sessions_p = (SESSION_ID *) malloc (css_Num_active_conn * sizeof (SESSION_ID));
if (sessions_p == NULL)
{
er_set (ER_ERROR_SEVERITY, ARG_FILE_LINE, ER_OUT_OF_VIRTUAL_MEMORY, 1, css_Num_active_conn * sizeof (SESSION_ID));
error = ER_FAILED;
END_SHARED_ACCESS_ACTIVE_CONN_ANCHOR (r);
goto error_return;
}
for (conn = css_Active_conn_anchor; conn != NULL; conn = next)
{
next = conn->next;
sessions_p[i] = conn->session_id;
i++;
}
END_SHARED_ACCESS_ACTIVE_CONN_ANCHOR (r);
*session_ids = sessions_p;
return error;
error_return:
if (sessions_p != NULL)
{
free_and_init (sessions_p);
}
*session_ids = NULL;
if (count != NULL)
{
*count = 0;
}
return error;
}
/*
* css_shutdown_conn_by_tran_index() - shutdown connection having given
* transaction id
* return: void
* tran_index(in): transaction id
*/
void
css_shutdown_conn_by_tran_index (int tran_index)
{
CSS_CONN_ENTRY *conn = NULL;
int r;
if (css_Active_conn_anchor != NULL)
{
START_EXCLUSIVE_ACCESS_ACTIVE_CONN_ANCHOR (r);
for (conn = css_Active_conn_anchor; conn != NULL; conn = conn->next)
{
if (conn->get_tran_index () == tran_index)
{
if (conn->status == CONN_OPEN)
{
conn->status = CONN_CLOSING;
}
break;
}
}
END_EXCLUSIVE_ACCESS_ACTIVE_CONN_ANCHOR (r);
}
}
/*
* css_get_request_id() - return the next valid request id
* return: request id
* conn(in): connection entry
*/
unsigned short
css_get_request_id (CSS_CONN_ENTRY * conn)
{
unsigned short old_rid;
unsigned short request_id;
int r;
r = rmutex_lock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
old_rid = conn->request_id++;
if (conn->request_id == 0)
{
conn->request_id++;
}
while (conn->request_id != old_rid)
{
if (css_is_valid_request_id (conn, conn->request_id))
{
request_id = conn->request_id;
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
return (request_id);
}
else
{
conn->request_id++;
if (conn->request_id == 0)
{
conn->request_id++;
}
}
}
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
/* Should never reach this point */
er_set (ER_WARNING_SEVERITY, ARG_FILE_LINE, ERR_CSS_REQUEST_ID_FAILURE, 0);
return (0);
}
/*
* css_abort_request() - helper routine to actually send the abort request.
* return: 0 if success, or error code
* conn(in): connection entry
* rid(in): request id
*/
static int
css_abort_request (CSS_CONN_ENTRY * conn, unsigned short rid)
{
NET_HEADER header = DEFAULT_HEADER_DATA;
unsigned short flags = 0;
header.type = htonl (ABORT_TYPE);
header.request_id = htonl (rid);
header.transaction_id = htonl (conn->get_tran_index ());
/**
* FIXME!!
* make NET_HEADER_FLAG_INVALIDATE_SNAPSHOT be enabled always due to CBRD-24157
*
* flags was mis-readed at css_read_header() and fixed at CBRD-24118.
* But The side effects described in CBRD-24157 occurred.
*/
if (true) // if (conn->invalidate_snapshot)
{
flags |= NET_HEADER_FLAG_INVALIDATE_SNAPSHOT;
}
if (conn->in_method)
{
flags |= NET_HEADER_FLAG_METHOD_MODE;
}
header.flags = htons (flags);
header.db_error = htonl (conn->db_error);
/* timeout in milli-second in css_net_send() */
return css_net_send (conn, (char *) &header, sizeof (NET_HEADER), -1);
}
/*
* css_send_abort_request() - abort an outstanding request.
* return: 0 if success, or error code
* conn(in): connection entry
* request_id(in): request id
*
* Note: Once this is issued, any queued data buffers for this command will be
* released.
*/
int
css_send_abort_request (CSS_CONN_ENTRY * conn, unsigned short request_id)
{
int rc, r;
if (!conn || conn->status != CONN_OPEN)
{
return CONNECTION_CLOSED;
}
r = rmutex_lock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
css_remove_unexpected_packets (conn, request_id);
rc = css_abort_request (conn, request_id);
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
return rc;
}
/*
* css_read_header() - helper routine that will read a header from the socket.
* return: 0 if success, or error code
* conn(in): connection entry
* local_header(in):
*
* Note: It is a blocking read.
*/
int
css_read_header (CSS_CONN_ENTRY * conn, const NET_HEADER * local_header)
{
int buffer_size;
int rc = 0;
unsigned short flags = 0;
buffer_size = sizeof (NET_HEADER);
if (conn->stop_talk == true)
{
return CONNECTION_CLOSED;
}
rc = css_net_read_header (conn->fd, (char *) local_header, &buffer_size, -1);
if (rc == NO_ERRORS && ntohl (local_header->type) == CLOSE_TYPE)
{
return CONNECTION_CLOSED;
}
if (rc != NO_ERRORS && rc != RECORD_TRUNCATED)
{
return CONNECTION_CLOSED;
}
conn->set_tran_index (ntohl (local_header->transaction_id));
conn->db_error = (int) ntohl (local_header->db_error);
flags = ntohs (local_header->flags);
conn->invalidate_snapshot = flags & NET_HEADER_FLAG_INVALIDATE_SNAPSHOT ? 1 : 0;
conn->in_method = flags & NET_HEADER_FLAG_METHOD_MODE ? true : false;
return rc;
}
/*
* css_receive_request() - receive request from client
* return: 0 if success, or error code
* conn(in): connection entry
* rid(out): request id
* request(out): request
* buffer_size(out): request data size
*/
int
css_receive_request (CSS_CONN_ENTRY * conn, unsigned short *rid, int *request, int *buffer_size)
{
return css_return_queued_request (conn, rid, request, buffer_size);
}
/*
* css_read_and_queue() - Attempt to read any data packet from the connection.
* return: 0 if success, or error code
* conn(in): connection entry
* type(out): request type
*/
int
css_read_and_queue (CSS_CONN_ENTRY * conn, int *type)
{
int rc;
NET_HEADER header = DEFAULT_HEADER_DATA;
if (!conn || conn->status != CONN_OPEN)
{
return ERROR_ON_READ;
}
rc = css_read_header (conn, &header);
if (conn->stop_talk == true)
{
return CONNECTION_CLOSED;
}
if (rc != NO_ERRORS)
{
return rc;
}
*type = ntohl (header.type);
rc = css_queue_packet (conn, (int) ntohl (header.type), (unsigned short) ntohl (header.request_id), &header,
sizeof (NET_HEADER));
return rc;
}
/*
* css_receive_data() - receive a data for an associated request.
* return: 0 if success, or error code
* conn(in): connection entry
* req_id(in): request id
* buffer(out): buffer for data
* buffer_size(out): buffer size
* timeout(in):
*
* Note: this is a blocking read.
*/
int
css_receive_data (CSS_CONN_ENTRY * conn, unsigned short req_id, char **buffer, int *buffer_size, int timeout)
{
int *r, rc;
/* at here, do not use stack variable; must alloc it */
r = (int *) malloc (sizeof (int));
if (r == NULL)
{
return NO_DATA_AVAILABLE;
}
css_return_queued_data_timeout (conn, req_id, buffer, buffer_size, r, timeout);
rc = *r;
free_and_init (r);
return rc;
}
/*
* css_return_eid_from_conn() - get enquiry id from connection entry
* return: enquiry id
* conn(in): connection entry
* rid(in): request id
*/
unsigned int
css_return_eid_from_conn (CSS_CONN_ENTRY * conn, unsigned short rid)
{
return css_make_eid ((unsigned short) conn->idx, rid);
}
/*
* css_make_eid() - make enquiry id
* return: enquiry id
* entry_id(in): connection entry id
* rid(in): request id
*/
static unsigned int
css_make_eid (unsigned short entry_id, unsigned short rid)
{
int top;
top = entry_id;
return ((top << 16) | rid);
}
/* CSS_CONN_ENTRY's queues related functions */
/*
* css_claim_queue_entry() - claim a queue entry from free list.
* return: CSS_QUEUE_ENTRY *
* conn(in): connection entry
*/
static CSS_QUEUE_ENTRY *
css_claim_queue_entry (CSS_CONN_ENTRY * conn)
{
CSS_QUEUE_ENTRY *p;
assert (conn != NULL);
p = conn->free_queue_list;
if (p == NULL)
{
return NULL;
}
conn->free_queue_list = p->next;
conn->free_queue_count--;
assert (0 <= conn->free_queue_count);
p->next = NULL;
return p;
}
/*
* css_retire_queue_entry() - retire a queue entry to free list.
* return: void
* conn(in): connection entry
* entry(in): CSS_QUEUE_ENTRY * to be retired
*/
static void
css_retire_queue_entry (CSS_CONN_ENTRY * conn, CSS_QUEUE_ENTRY * entry)
{
assert (conn != NULL && entry != NULL);
entry->next = conn->free_queue_list;
conn->free_queue_list = entry;
conn->free_queue_count++;
assert (0 < conn->free_queue_count);
}
/*
* css_free_queue_entry_list() - free all entries of free queue list
* return: void
* conn(in): connection entry
*/
static void
css_free_queue_entry_list (CSS_CONN_ENTRY * conn)
{
CSS_QUEUE_ENTRY *p;
assert (conn != NULL);
while (conn->free_queue_list != NULL)
{
p = conn->free_queue_list;
conn->free_queue_list = p->next;
free (p);
conn->free_queue_count--;
}
conn->free_queue_list = NULL;
assert (conn->free_queue_count == 0);
}
/*
* css_claim_wait_queue_entry() - claim a wait queue entry from free list.
* return: CSS_WAIT_QUEUE_ENTRY *
* conn(in): connection entry
*/
static CSS_WAIT_QUEUE_ENTRY *
css_claim_wait_queue_entry (CSS_CONN_ENTRY * conn)
{
CSS_WAIT_QUEUE_ENTRY *p;
assert (conn != NULL);
p = conn->free_wait_queue_list;
if (p == NULL)
{
return NULL;
}
conn->free_wait_queue_list = p->next;
conn->free_wait_queue_count--;
assert (0 <= conn->free_wait_queue_count);
p->next = NULL;
return p;
}
/*
* css_retire_wait_queue_entry() - retire a wait_queue entry to free list.
* return: void
* conn(in): connection entry
* entry(in): CSS_WAIT_QUEUE_ENTRY * to be retired
*/
static void
css_retire_wait_queue_entry (CSS_CONN_ENTRY * conn, CSS_WAIT_QUEUE_ENTRY * entry)
{
assert (conn != NULL && entry != NULL);
entry->next = conn->free_wait_queue_list;
conn->free_wait_queue_list = entry;
conn->free_wait_queue_count++;
assert (0 < conn->free_wait_queue_count);
}
/*
* css_free_wait_queue_list() - free all entries of free wait queue list
* return: void
* conn(in): connection entry
*/
static void
css_free_wait_queue_list (CSS_CONN_ENTRY * conn)
{
CSS_WAIT_QUEUE_ENTRY *p;
assert (conn != NULL);
while (conn->free_wait_queue_list != NULL)
{
p = conn->free_wait_queue_list;
conn->free_wait_queue_list = p->next;
free (p);
conn->free_wait_queue_count--;
}
conn->free_wait_queue_list = NULL;
assert (conn->free_wait_queue_count == 0);
}
/*
* css_claim_net_header_entry() - claim a net header entry from free list.
* return: NET_HEADER *
* conn(in): connection entry
*
* TODO - rewrite this to avoid ugly
*/
static NET_HEADER *
css_claim_net_header_entry (CSS_CONN_ENTRY * conn)
{
NET_HEADER *p;
assert (conn != NULL);
p = (NET_HEADER *) conn->free_net_header_list;
if (p == NULL)
{
return NULL;
}
conn->free_net_header_list = (char *) (*(UINTPTR *) p);
conn->free_net_header_count--;
assert (0 <= conn->free_net_header_count);
return p;
}
/*
* css_retire_net_header_entry() - retire a net header entry to free list.
* return: void
* conn(in): connection entry
* entry(in): NET_HEADER * to be retired
*/
static void
css_retire_net_header_entry (CSS_CONN_ENTRY * conn, NET_HEADER * entry)
{
assert (conn != NULL && entry != NULL);
*(UINTPTR *) entry = (UINTPTR) conn->free_net_header_list;
conn->free_net_header_list = (char *) entry;
conn->free_net_header_count++;
assert (0 < conn->free_net_header_count);
}
/*
* css_free_net_header_list() - free all entries of free net header list
* return: void
* conn(in): connection entry
*/
static void
css_free_net_header_list (CSS_CONN_ENTRY * conn)
{
char *p;
assert (conn != NULL);
while (conn->free_net_header_list != NULL)
{
p = conn->free_net_header_list;
conn->free_net_header_list = (char *) (*(UINTPTR *) p);
conn->free_net_header_count--;
free (p);
}
conn->free_net_header_list = NULL;
assert (conn->free_net_header_count == 0);
}
/*
* css_make_queue_entry() - make queue entey
* return: queue entry
* conn(in): connection entry
* key(in):
* buffer(in):
* size(in):
* rc(in):
* transid(in):
* db_error(in):
*/
static CSS_QUEUE_ENTRY *
css_make_queue_entry (CSS_CONN_ENTRY * conn, unsigned int key, char *buffer,
int size, int rc, int transid, int invalidate_snapshot, int db_error)
{
CSS_QUEUE_ENTRY *p;
if (conn->free_queue_list != NULL)
{
p = css_claim_queue_entry (conn);
}
else
{
p = (CSS_QUEUE_ENTRY *) malloc (sizeof (CSS_QUEUE_ENTRY));
}
if (p == NULL)
{
return NULL;
}
p->key = key;
p->buffer = buffer;
p->size = size;
p->rc = rc;
p->transaction_id = transid;
p->invalidate_snapshot = invalidate_snapshot;
p->db_error = db_error;
return p;
}
/*
* css_free_queue_entry() - free queue entry
* return: void
* conn(in): connection entry
* entry(in): queue entry
*/
static void
css_free_queue_entry (CSS_CONN_ENTRY * conn, CSS_QUEUE_ENTRY * entry)
{
if (entry == NULL)
{
return;
}
if (entry->buffer != NULL)
{
free_and_init (entry->buffer);
}
css_retire_queue_entry (conn, entry);
}
/*
* css_add_queue_entry() - add queue entry
* return: 0 if success, or error code
* conn(in): connection entry
* list(in): queue list
* request_id(in): request id
* buffer(in):
* buffer_size(in):
* rc(in):
* transid(in):
* db_error(in):
*/
static css_error_code
css_add_queue_entry (CSS_CONN_ENTRY * conn, CSS_LIST * list, unsigned short request_id, char *buffer, int buffer_size,
int rc, int transid, int invalidate_snapshot, int db_error)
{
CSS_QUEUE_ENTRY *p;
int r;
p = css_make_queue_entry (conn, request_id, buffer, buffer_size, rc, transid, invalidate_snapshot, db_error);
if (p == NULL)
{
return CANT_ALLOC_BUFFER;
}
r = css_add_list (list, p);
if (r != NO_ERROR)
{
css_retire_queue_entry (conn, p);
return CANT_ALLOC_BUFFER;
}
return NO_ERRORS;
}
/*
* css_find_queue_entry_by_key() - find queue entry using key
* return: status of traverse
* data(in): queue entry
* user(in): search argument
*/
static int
css_find_queue_entry_by_key (void *data, void *user)
{
CSS_QUEUE_SEARCH_ARG *arg = (CSS_QUEUE_SEARCH_ARG *) user;
CSS_QUEUE_ENTRY *p = (CSS_QUEUE_ENTRY *) data;
if (p->key == arg->key)
{
arg->entry_ptr = p;
if (arg->remove_entry)
{
return TRAV_STOP_DELETE;
}
else
{
return TRAV_STOP;
}
}
return TRAV_CONT;
}
/*
* css_find_queue_entry() - find queue entry
* return: queue entry
* list(in): queue list
* key(in): key
*/
static CSS_QUEUE_ENTRY *
css_find_queue_entry (CSS_LIST * list, unsigned int key)
{
CSS_QUEUE_SEARCH_ARG arg;
arg.entry_ptr = NULL;
arg.key = key;
arg.remove_entry = 0;
css_traverse_list (list, css_find_queue_entry_by_key, &arg);
return arg.entry_ptr;
}
/*
* css_find_and_remove_queue_entry() - find queue entry and remove it
* return: queue entry
* list(in): queue list
* key(in): key
*/
static CSS_QUEUE_ENTRY *
css_find_and_remove_queue_entry (CSS_LIST * list, unsigned int key)
{
CSS_QUEUE_SEARCH_ARG arg;
arg.entry_ptr = NULL;
arg.key = key;
arg.remove_entry = 1;
css_traverse_list (list, css_find_queue_entry_by_key, &arg);
return arg.entry_ptr;
}
/*
* css_make_wait_queue_entry() - make wait queue entry
* return: wait queue entry
* conn(in): connection entry
* key(in):
* buffer(out):
* size(out):
* rc(out):
*/
static CSS_WAIT_QUEUE_ENTRY *
css_make_wait_queue_entry (CSS_CONN_ENTRY * conn, unsigned int key, char **buffer, int *size, int *rc)
{
CSS_WAIT_QUEUE_ENTRY *p;
if (conn->free_wait_queue_list != NULL)
{
p = css_claim_wait_queue_entry (conn);
}
else
{
p = (CSS_WAIT_QUEUE_ENTRY *) malloc (sizeof (CSS_WAIT_QUEUE_ENTRY));
}
if (p == NULL)
{
return NULL;
}
p->key = key;
p->buffer = buffer;
p->size = size;
p->rc = rc;
p->thrd_entry = thread_get_thread_entry_info ();
return p;
}
/*
* css_free_wait_queue_entry() - free wait queue entry
* return: void
* conn(in): connection entry
* entry(in): wait queue entry
*/
static void
css_free_wait_queue_entry (CSS_CONN_ENTRY * conn, CSS_WAIT_QUEUE_ENTRY * entry)
{
if (entry == NULL)
{
return;
}
if (entry->thrd_entry != NULL)
{
thread_lock_entry (entry->thrd_entry);
assert (entry->thrd_entry->resume_status == THREAD_CSS_QUEUE_SUSPENDED);
thread_wakeup_already_had_mutex (entry->thrd_entry, THREAD_CSS_QUEUE_RESUMED);
thread_unlock_entry (entry->thrd_entry);
}
css_retire_wait_queue_entry (conn, entry);
}
/*
* css_add_wait_queue_entry() - add wait queue entry
* return: wait queue entry
* conn(in): connection entry
* list(in): wait queue list
* request_id(in): request id
* buffer(out):
* buffer_size(out):
* rc(out):
*/
static CSS_WAIT_QUEUE_ENTRY *
css_add_wait_queue_entry (CSS_CONN_ENTRY * conn, CSS_LIST * list, unsigned short request_id, char **buffer,
int *buffer_size, int *rc)
{
CSS_WAIT_QUEUE_ENTRY *p;
p = css_make_wait_queue_entry (conn, request_id, buffer, buffer_size, rc);
if (p == NULL)
{
return NULL;
}
if (css_add_list (list, p) != NO_ERROR)
{
css_retire_wait_queue_entry (conn, p);
return NULL;
}
return p;
}
/*
* find_wait_queue_entry_by_key() - find wait queue entry using key
* return: status of traverse
* data(in): wait queue entry
* user(in): search argument
*/
static int
find_wait_queue_entry_by_key (void *data, void *user)
{
CSS_WAIT_QUEUE_SEARCH_ARG *arg = (CSS_WAIT_QUEUE_SEARCH_ARG *) user;
CSS_WAIT_QUEUE_ENTRY *p = (CSS_WAIT_QUEUE_ENTRY *) data;
if (p->key == arg->key)
{
arg->entry_ptr = p;
if (arg->remove_entry)
{
return TRAV_STOP_DELETE;
}
else
{
return TRAV_STOP;
}
}
return TRAV_CONT;
}
/*
* css_find_and_remove_wait_queue_entry() - find wait queue entry and remove it
* return: wait queue entry
* list(in): wait queue list
* key(in):
*/
static CSS_WAIT_QUEUE_ENTRY *
css_find_and_remove_wait_queue_entry (CSS_LIST * list, unsigned int key)
{
CSS_WAIT_QUEUE_SEARCH_ARG arg;
arg.entry_ptr = NULL;
arg.key = key;
arg.remove_entry = 1;
css_traverse_list (list, find_wait_queue_entry_by_key, &arg);
return arg.entry_ptr;
}
/*
* css_queue_packet() - queue packet
* return: void
* conn(in): connection entry
* type(in): packet type
* request_id(in): request id
* header(in): network header
* size(in): packet size
*/
static css_error_code
css_queue_packet (CSS_CONN_ENTRY * conn, int type, unsigned short request_id, const NET_HEADER * header, int size)
{
THREAD_ENTRY *wait_thrd = NULL, *p, *next;
unsigned short flags = 0;
int r;
int transaction_id, db_error, invalidate_snapshot;
css_error_code rc = NO_ERRORS;
transaction_id = ntohl (header->transaction_id);
db_error = (int) ntohl (header->db_error);
flags = ntohs (header->flags);
invalidate_snapshot = flags & NET_HEADER_FLAG_INVALIDATE_SNAPSHOT ? 1 : 0;
bool in_method = flags & NET_HEADER_FLAG_METHOD_MODE ? true : false;
r = rmutex_lock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
if (conn->stop_talk)
{
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
return CONNECTION_CLOSED;
}
conn->set_tran_index (transaction_id);
conn->db_error = db_error;
conn->invalidate_snapshot = invalidate_snapshot;
conn->in_method = in_method;
switch (type)
{
case CLOSE_TYPE:
css_process_close_packet (conn);
break;
case ABORT_TYPE:
css_process_abort_packet (conn, request_id);
break;
case DATA_TYPE:
css_queue_data_packet (conn, request_id, header, &wait_thrd);
break;
case ERROR_TYPE:
css_queue_error_packet (conn, request_id, header);
break;
case COMMAND_TYPE:
rc = css_queue_command_packet (conn, request_id, header, size);
if (rc != NO_ERRORS)
{
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
return rc;
}
break;
default:
CSS_TRACE2 ("Asked to queue an unknown packet id = %d.\n", type);
assert (false);
return WRONG_PACKET_TYPE;
}
p = wait_thrd;
while (p != NULL)
{
thread_lock_entry (p);
assert (p->resume_status == THREAD_CSS_QUEUE_SUSPENDED || p->resume_status == THREAD_CSECT_WRITER_SUSPENDED);
next = p->next_wait_thrd;
p->next_wait_thrd = NULL;
/* When the resume_status is THREAD_CSS_QUEUE_SUSPENDED, it means the data waiting thread is still waiting on the
* data queue. Otherwise, in case of THREAD_CSECT_WRITER_SUSPENDED, it means that the thread was timed out, is
* trying to clear its queue buffer (see clear_wait_queue_entry_and_free_buffer function), and waiting for its
* conn->csect. We don't need to wakeup the thread for this case. We may send useless signal for it, but it may
* bring other anomalies: the thread may sleep on another resources which we don't know at this moment. */
if (p->resume_status == THREAD_CSS_QUEUE_SUSPENDED)
{
thread_wakeup_already_had_mutex (p, THREAD_CSS_QUEUE_RESUMED);
}
thread_unlock_entry (p);
p = next;
}
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
return NO_ERRORS;
}
/*
* css_process_close_packet() - prccess close packet
* return: void
* conn(in): conenction entry
*/
static void
css_process_close_packet (CSS_CONN_ENTRY * conn)
{
if (!IS_INVALID_SOCKET (conn->fd))
{
css_shutdown_socket (conn->fd);
conn->fd = INVALID_SOCKET;
}
conn->status = CONN_CLOSED;
}
/*
* css_process_abort_packet() - process abort packet
* return: void
* conn(in): connection entry
* request_id(in): request id
*/
static void
css_process_abort_packet (CSS_CONN_ENTRY * conn, unsigned short request_id)
{
CSS_QUEUE_ENTRY *request, *data;
request = css_find_and_remove_queue_entry (&conn->request_queue, request_id);
if (request)
{
css_free_queue_entry (conn, request);
}
data = css_find_and_remove_queue_entry (&conn->data_queue, request_id);
if (data)
{
css_free_queue_entry (conn, data);
}
if (css_find_queue_entry (&conn->abort_queue, request_id) == NULL)
{
css_add_queue_entry (conn, &conn->abort_queue, request_id, NULL, 0,
NO_ERRORS, conn->get_tran_index (), conn->invalidate_snapshot, conn->db_error);
}
}
/*
* css_queue_data_packet() - queue data packet
* return: void
* conn(in): connection entry
* request_id(in): request id
* header(in): network header
* wake_thrd(out): thread that wake up
*/
static void
css_queue_data_packet (CSS_CONN_ENTRY * conn, unsigned short request_id,
const NET_HEADER * header, THREAD_ENTRY ** wake_thrd)
{
THREAD_ENTRY *thrd = NULL, *last = NULL;
CSS_QUEUE_ENTRY *buffer_entry;
CSS_WAIT_QUEUE_ENTRY *data_wait = NULL;
char *buffer = NULL;
int rc;
int size; /* size to be read */
/* setup wake_thrd. hmm.. consider recursion */
if (*wake_thrd != NULL)
{
last = *wake_thrd;
while (last->next_wait_thrd != NULL)
{
last = last->next_wait_thrd;
}
}
size = ntohl (header->buffer_size);
/* check if user have given a buffer */
buffer_entry = css_find_and_remove_queue_entry (&conn->buffer_queue, request_id);
if (buffer_entry != NULL)
{
/* compare data and buffer size. if different? something wrong!!! */
if (size > buffer_entry->size)
{
size = buffer_entry->size;
}
buffer = buffer_entry->buffer;
buffer_entry->buffer = NULL;
css_free_queue_entry (conn, buffer_entry);
}
else if (size == 0)
{
buffer = NULL;
}
else
{
buffer = (char *) malloc (size);
}
/*
* check if there exists thread waiting for data.
* Add to wake_thrd list.
*/
data_wait = css_find_and_remove_wait_queue_entry (&conn->data_wait_queue, request_id);
if (data_wait != NULL)
{
thrd = data_wait->thrd_entry;
thrd->next_wait_thrd = NULL;
if (last == NULL)
{
*wake_thrd = thrd;
}
else
{
last->next_wait_thrd = thrd;
}
last = thrd;
}
/* receive data into buffer and queue data if there's no waiting thread */
if (buffer != NULL)
{
rc = css_net_recv (conn->fd, buffer, &size, -1);
if (rc == NO_ERRORS || rc == RECORD_TRUNCATED)
{
if (!css_is_request_aborted (conn, request_id))
{
if (data_wait == NULL)
{
/* if waiter not exists, add to data queue */
css_add_queue_entry (conn, &conn->data_queue, request_id, buffer, size, rc, conn->get_tran_index (),
conn->invalidate_snapshot, conn->db_error);
return;
}
else
{
*data_wait->buffer = buffer;
*data_wait->size = size;
*data_wait->rc = rc;
data_wait->thrd_entry = NULL;
css_free_wait_queue_entry (conn, data_wait);
return;
}
}
}
/* if error occurred */
free_and_init (buffer);
}
else
{
rc = CANT_ALLOC_BUFFER;
css_read_remaining_bytes (conn->fd, sizeof (int) + size);
if (!css_is_request_aborted (conn, request_id))
{
if (data_wait == NULL)
{
css_add_queue_entry (conn, &conn->data_queue, request_id, NULL,
0, rc, conn->get_tran_index (), conn->invalidate_snapshot, conn->db_error);
return;
}
}
}
/* if error was occurred, setup error status */
if (data_wait != NULL)
{
*data_wait->buffer = NULL;
*data_wait->size = 0;
*data_wait->rc = rc;
}
}
/*
* css_queue_error_packet() - queue error packet
* return: void
* conn(in): connection entry
* request_id(in): request id
* header(in): network header
*/
static void
css_queue_error_packet (CSS_CONN_ENTRY * conn, unsigned short request_id, const NET_HEADER * header)
{
char *buffer;
int rc;
int size;
size = ntohl (header->buffer_size);
buffer = (char *) malloc (size);
if (buffer != NULL)
{
rc = css_net_recv (conn->fd, buffer, &size, -1);
if (rc == NO_ERRORS || rc == RECORD_TRUNCATED)
{
if (!css_is_request_aborted (conn, request_id))
{
css_add_queue_entry (conn, &conn->error_queue, request_id,
buffer, size, rc, conn->get_tran_index (), conn->invalidate_snapshot,
conn->db_error);
return;
}
}
free_and_init (buffer);
}
else
{
rc = CANT_ALLOC_BUFFER;
css_read_remaining_bytes (conn->fd, sizeof (int) + size);
if (!css_is_request_aborted (conn, request_id))
{
css_add_queue_entry (conn, &conn->error_queue, request_id, NULL, 0,
rc, conn->get_tran_index (), conn->invalidate_snapshot, conn->db_error);
}
}
}
/*
* css_queue_command_packet() - queue command packet
* return: void
* conn(in): connection entry
* request_id(in): request id
* header(in): network header
* size(in): packet size
*/
static css_error_code
css_queue_command_packet (CSS_CONN_ENTRY * conn, unsigned short request_id, const NET_HEADER * header, int size)
{
NET_HEADER *p;
NET_HEADER data_header = DEFAULT_HEADER_DATA;
css_error_code rc = NO_ERRORS;
assert (!conn->stop_talk);
if (css_is_request_aborted (conn, request_id))
{
// ignore
return NO_ERRORS;
}
if (conn->free_net_header_list != NULL)
{
p = css_claim_net_header_entry (conn);
}
else
{
p = (NET_HEADER *) malloc (sizeof (NET_HEADER));
}
if (p == NULL)
{
assert (false);
return CANT_ALLOC_BUFFER;
}
memcpy ((char *) p, (char *) header, sizeof (NET_HEADER));
rc = css_add_queue_entry (conn, &conn->request_queue, request_id, (char *) p, size, NO_ERRORS,
conn->get_tran_index (), conn->invalidate_snapshot, conn->db_error);
if (rc != NO_ERRORS)
{
css_retire_net_header_entry (conn, p);
return rc;
}
if (ntohl (header->buffer_size) <= 0)
{
// a request without a buffer, e.g, NET_SERVER_LOG_CHECKPOINT, NET_SERVER_TM_SERVER_ABORT.
return NO_ERRORS;
}
rc = (css_error_code) css_read_header (conn, &data_header);
if (rc != NO_ERRORS)
{
// what to do?
return rc;
}
rc = css_queue_packet (conn, (int) ntohl (data_header.type), (unsigned short) ntohl (data_header.request_id),
&data_header, sizeof (NET_HEADER));
return rc;
}
/*
* css_request_aborted() - check request is aborted
* return: true if aborted, or false
* conn(in): connection entry
* request_id(in): request id
*/
static bool
css_is_request_aborted (CSS_CONN_ENTRY * conn, unsigned short request_id)
{
CSS_QUEUE_ENTRY *p;
p = css_find_queue_entry (&conn->abort_queue, request_id);
if (p != NULL)
{
return true;
}
else
{
return false;
}
}
/*
* css_return_queued_request() - get request from queue
* return: 0 if success, or error code
* conn(in): connection entry
* rid(out): request id
* request(out): request
* buffer_size(out): request buffer size
*/
int
css_return_queued_request (CSS_CONN_ENTRY * conn, unsigned short *rid, int *request, int *buffer_size)
{
CSS_QUEUE_ENTRY *p;
NET_HEADER *buffer;
int rc, r;
r = rmutex_lock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
if (conn->status == CONN_OPEN)
{
p = (CSS_QUEUE_ENTRY *) css_remove_list_from_head (&conn->request_queue);
if (p != NULL)
{
*rid = p->key;
buffer = (NET_HEADER *) p->buffer;
p->buffer = NULL;
*request = ntohs (buffer->function_code);
*buffer_size = ntohl (buffer->buffer_size);
conn->set_tran_index (p->transaction_id);
conn->invalidate_snapshot = p->invalidate_snapshot;
conn->in_method = p->in_method;
conn->db_error = p->db_error;
css_retire_net_header_entry (conn, buffer);
css_free_queue_entry (conn, p);
rc = NO_ERRORS;
}
else
{
rc = NO_DATA_AVAILABLE;
}
}
else
{
rc = CONN_CLOSED;
}
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
return rc;
}
/*
* clear_wait_queue_entry_and_free_buffer () - remove data_wait_queue entry when completing or aborting
* to receive buffer from data_wait_queue.
* return: void
* conn(in): connection entry
* rid(in): request id
* bufferp(in): data buffer
*/
static void
clear_wait_queue_entry_and_free_buffer (THREAD_ENTRY * thrdp, CSS_CONN_ENTRY * conn, unsigned short rid, char **bufferp)
{
CSS_WAIT_QUEUE_ENTRY *data_wait;
int r;
r = rmutex_lock (thrdp, &conn->rmutex);
assert (r == NO_ERROR);
/* check the deadlock related problem */
data_wait = css_find_and_remove_wait_queue_entry (&conn->data_wait_queue, rid);
/* data_wait might be always not NULL except the actual connection close */
if (data_wait)
{
assert (data_wait->thrd_entry == thrdp); /* it must be me */
data_wait->thrd_entry = NULL;
css_free_wait_queue_entry (conn, data_wait);
}
else
{
/* connection_handler_thread may proceed ahead of me right after timeout has happened. If the case, we must free
* the buffer. */
if (*bufferp != NULL)
{
free_and_init (*bufferp);
}
}
r = rmutex_unlock (thrdp, &conn->rmutex);
assert (r == NO_ERROR);
}
/*
* css_return_queued_data_timeout() - get request data from queue until timeout
* return: 0 if success, or error code
* conn(in): connection entry
* rid(out): request id
* buffer(out): data buffer
* bufsize(out): buffer size
* rc(out):
* waitsec: timeout second
*/
static int
css_return_queued_data_timeout (CSS_CONN_ENTRY * conn, unsigned short rid,
char **buffer, int *bufsize, int *rc, int waitsec)
{
CSS_QUEUE_ENTRY *data_entry, *buffer_entry;
CSS_WAIT_QUEUE_ENTRY *data_wait;
int r;
/* enter the critical section of this connection */
r = rmutex_lock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
*buffer = NULL;
*bufsize = -1;
/* if conn is closed or to be closed, return CONECTION_CLOSED */
if (conn->status == CONN_OPEN)
{
/* look up the data queue first to see if the required data is arrived and queued already */
data_entry = css_find_and_remove_queue_entry (&conn->data_queue, rid);
if (data_entry)
{
/* look up the buffer queue to see if the user provided the receive data buffer */
buffer_entry = css_find_and_remove_queue_entry (&conn->buffer_queue, rid);
if (buffer_entry)
{
/* copy the received data to the user provided buffer area */
*buffer = buffer_entry->buffer;
*bufsize = MIN (data_entry->size, buffer_entry->size);
if (*buffer != data_entry->buffer || *bufsize != data_entry->size)
{
memcpy (*buffer, data_entry->buffer, *bufsize);
}
/* destroy the buffer queue entry */
buffer_entry->buffer = NULL;
css_free_queue_entry (conn, buffer_entry);
}
else
{
/* set the buffer to point to the data queue entry */
*buffer = data_entry->buffer;
*bufsize = data_entry->size;
data_entry->buffer = NULL;
}
/* set return code, transaction id, and error code */
*rc = data_entry->rc;
conn->set_tran_index (data_entry->transaction_id);
conn->invalidate_snapshot = data_entry->invalidate_snapshot;
conn->in_method = data_entry->in_method;
conn->db_error = data_entry->db_error;
css_free_queue_entry (conn, data_entry);
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
return NO_ERRORS;
}
else
{
THREAD_ENTRY *thrd;
/* no data queue entry means that the data is not arrived yet; wait until the data arrives */
*rc = NO_DATA_AVAILABLE;
/* lock thread entry before enqueue an entry to data wait queue in order to prevent being woken up by
* 'css_queue_packet()' before this thread suspends */
thrd = thread_get_thread_entry_info ();
thread_lock_entry (thrd);
/* make a data wait queue entry */
data_wait = css_add_wait_queue_entry (conn, &conn->data_wait_queue, rid, buffer, bufsize, rc);
if (data_wait)
{
/* exit the critical section before to be suspended */
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
/* fall to the thread sleep until the socket listener 'css_server_thread()' receives and enqueues the
* data */
if (waitsec < 0)
{
thread_suspend_wakeup_and_unlock_entry (thrd, THREAD_CSS_QUEUE_SUSPENDED);
if (thrd->resume_status != THREAD_CSS_QUEUE_RESUMED)
{
assert (thrd->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT);
clear_wait_queue_entry_and_free_buffer (thrd, conn, rid, buffer);
*buffer = NULL;
*bufsize = -1;
return NO_DATA_AVAILABLE;
}
else
{
assert (thrd->resume_status == THREAD_CSS_QUEUE_RESUMED);
}
}
else
{
int r;
struct timespec abstime;
abstime.tv_sec = (int) time (NULL) + waitsec;
abstime.tv_nsec = 0;
r = thread_suspend_timeout_wakeup_and_unlock_entry (thrd, &abstime, THREAD_CSS_QUEUE_SUSPENDED);
if (r == ER_CSS_PTHREAD_COND_TIMEDOUT)
{
clear_wait_queue_entry_and_free_buffer (thrd, conn, rid, buffer);
*rc = TIMEDOUT_ON_QUEUE;
*buffer = NULL;
*bufsize = -1;
return TIMEDOUT_ON_QUEUE;
}
else if (thrd->resume_status != THREAD_CSS_QUEUE_RESUMED)
{
assert (thrd->resume_status == THREAD_RESUME_DUE_TO_INTERRUPT);
clear_wait_queue_entry_and_free_buffer (thrd, conn, rid, buffer);
*buffer = NULL;
*bufsize = -1;
return NO_DATA_AVAILABLE;
}
else
{
assert (thrd->resume_status == THREAD_CSS_QUEUE_RESUMED);
}
}
if (*buffer == NULL || *bufsize < 0)
{
return CONNECTION_CLOSED;
}
if (*rc == CONNECTION_CLOSED)
{
clear_wait_queue_entry_and_free_buffer (thrd, conn, rid, buffer);
}
return NO_ERRORS;
}
else
{
/* oops! error! unlock thread entry */
thread_unlock_entry (thrd);
/* allocation error */
*rc = CANT_ALLOC_BUFFER;
}
}
}
else
{
/* conn->status == CONN_CLOSED || CONN_CLOSING; the connection was closed */
*rc = CONNECTION_CLOSED;
}
/* exit the critical section */
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
return *rc;
}
/*
* css_return_queued_data() - get data from queue
* return: 0 if success, or error code
* conn(in): connection entry
* rid(out): request id
* buffer(out): data buffer
* bufsize(out): buffer size
* rc(out):
*/
int
css_return_queued_data (CSS_CONN_ENTRY * conn, unsigned short rid, char **buffer, int *bufsize, int *rc)
{
return css_return_queued_data_timeout (conn, rid, buffer, bufsize, rc, -1);
}
/*
* css_return_queued_error() - get error from queue
* return: 0 if success, or error code
* conn(in): connection entry
* request_id(out): request id
* buffer(out): data buffer
* buffer_size(out): buffer size
* rc(out):
*/
int
css_return_queued_error (CSS_CONN_ENTRY * conn, unsigned short request_id, char **buffer, int *buffer_size, int *rc)
{
CSS_QUEUE_ENTRY *p;
int ret = 0, r;
r = rmutex_lock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
p = css_find_and_remove_queue_entry (&conn->error_queue, request_id);
if (p != NULL)
{
*buffer = p->buffer;
*buffer_size = p->size;
*rc = p->db_error;
p->buffer = NULL;
css_free_queue_entry (conn, p);
ret = 1;
}
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
return ret;
}
/*
* css_is_valid_request_id() - check request id id valid
* return: true if valid, or false
* conn(in): connection entry
* request_id(in): request id
*/
static bool
css_is_valid_request_id (CSS_CONN_ENTRY * conn, unsigned short request_id)
{
if (css_find_queue_entry (&conn->data_queue, request_id) != NULL)
{
return false;
}
if (css_find_queue_entry (&conn->request_queue, request_id) != NULL)
{
return false;
}
if (css_find_queue_entry (&conn->abort_queue, request_id) != NULL)
{
return false;
}
if (css_find_queue_entry (&conn->error_queue, request_id) != NULL)
{
return false;
}
return true;
}
/*
* css_remove_unexpected_packets() - remove unexpected packet
* return: void
* conn(in): connection entry
* request_id(in): request id
*/
void
css_remove_unexpected_packets (CSS_CONN_ENTRY * conn, unsigned short request_id)
{
css_free_queue_entry (conn, css_find_and_remove_queue_entry (&conn->request_queue, request_id));
css_free_queue_entry (conn, css_find_and_remove_queue_entry (&conn->data_queue, request_id));
css_free_queue_entry (conn, css_find_and_remove_queue_entry (&conn->error_queue, request_id));
}
/*
* css_queue_user_data_buffer() - queue user data
* return: 0 if success, or error code
* conn(in): connection entry
* request_id(in): request id
* size(in): buffer size
* buffer(in): buffer
*/
int
css_queue_user_data_buffer (CSS_CONN_ENTRY * conn, unsigned short request_id, int size, char *buffer)
{
int rc = NO_ERRORS, r;
r = rmutex_lock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
if (buffer && (!css_is_request_aborted (conn, request_id)))
{
rc = css_add_queue_entry (conn, &conn->buffer_queue, request_id, buffer,
size, NO_ERRORS, conn->get_tran_index (), conn->invalidate_snapshot, conn->db_error);
}
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
return rc;
}
/*
* css_remove_and_free_queue_entry() - free queue entry
* return: status if traverse
* data(in): connection entry
* arg(in): queue entry
*/
static int
css_remove_and_free_queue_entry (void *data, void *arg)
{
css_free_queue_entry ((CSS_CONN_ENTRY *) arg, (CSS_QUEUE_ENTRY *) data);
return TRAV_CONT_DELETE;
}
/*
* css_remove_and_free_wait_queue_entry() - free wait queue entry
* return: status if traverse
* data(in): connection entry
* arg(in): wait queue entry
*/
static int
css_remove_and_free_wait_queue_entry (void *data, void *arg)
{
css_free_wait_queue_entry ((CSS_CONN_ENTRY *) arg, (CSS_WAIT_QUEUE_ENTRY *) data);
return TRAV_CONT_DELETE;
}
/*
* css_remove_all_unexpected_packets() - remove all unexpected packets
* return: void
* conn(in): connection entry
*/
void
css_remove_all_unexpected_packets (CSS_CONN_ENTRY * conn)
{
int r;
r = rmutex_lock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
css_traverse_list (&conn->request_queue, css_remove_and_free_queue_entry, conn);
css_traverse_list (&conn->data_queue, css_remove_and_free_queue_entry, conn);
css_traverse_list (&conn->data_wait_queue, css_remove_and_free_wait_queue_entry, conn);
css_traverse_list (&conn->abort_queue, css_remove_and_free_queue_entry, conn);
css_traverse_list (&conn->error_queue, css_remove_and_free_queue_entry, conn);
r = rmutex_unlock (NULL, &conn->rmutex);
assert (r == NO_ERROR);
}
/*
* css_set_user_access_status() - set user access status information
* return: void
* db_user(in):
* host(in):
* program_name(in):
*/
void
css_set_user_access_status (const char *db_user, const char *host, const char *program_name)
{
LAST_ACCESS_STATUS *access = NULL;
assert (db_user != NULL);
assert (host != NULL);
assert (program_name != NULL);
csect_enter (NULL, CSECT_ACCESS_STATUS, INF_WAIT);
for (access = css_Access_status_anchor; access != NULL; access = access->next)
{
if (strcmp (access->db_user, db_user) == 0)
{
break;
}
}
if (access == NULL)
{
access = (LAST_ACCESS_STATUS *) malloc (sizeof (LAST_ACCESS_STATUS));
if (access == NULL)
{
/* if memory allocation fail, just ignore and return */
csect_exit (NULL, CSECT_ACCESS_STATUS);
return;
}
css_Num_access_user++;
memset (access, 0, sizeof (LAST_ACCESS_STATUS));
access->next = css_Access_status_anchor;
css_Access_status_anchor = access;
strncpy (access->db_user, db_user, sizeof (access->db_user) - 1);
}
csect_exit (NULL, CSECT_ACCESS_STATUS);
access->time = time (NULL);
strncpy (access->host, host, sizeof (access->host) - 1);
strncpy (access->program_name, program_name, sizeof (access->program_name) - 1);
return;
}
/*
* css_get_user_access_status() - get user access status informations
* return: void
* num_user(in):
* access_status_array(out):
*/
void
css_get_user_access_status (int num_user, LAST_ACCESS_STATUS ** access_status_array)
{
int i = 0;
LAST_ACCESS_STATUS *access = NULL;
csect_enter_as_reader (NULL, CSECT_ACCESS_STATUS, INF_WAIT);
for (access = css_Access_status_anchor; (access != NULL && i < num_user); access = access->next, i++)
{
access_status_array[i] = access;
}
csect_exit (NULL, CSECT_ACCESS_STATUS);
return;
}
/*
* css_free_user_access_status() - free all user access status information
* return: void
*/
void
css_free_user_access_status (void)
{
LAST_ACCESS_STATUS *access = NULL;
csect_enter (NULL, CSECT_ACCESS_STATUS, INF_WAIT);
while (css_Access_status_anchor != NULL)
{
access = css_Access_status_anchor;
css_Access_status_anchor = access->next;
free_and_init (access);
}
css_Num_access_user = 0;
csect_exit (NULL, CSECT_ACCESS_STATUS);
return;
}
|
c32aa2a593dcc3a5424a81d7c045c4b09e98131f
|
5f86fc385c7dcfcb5b166cdea7c8b13057b8bb5f
|
/include/openenclave/internal/sgx/extradata.h
|
cfe9b8bb9137d5ee677af2bafbdfde1092164250
|
[
"MIT"
] |
permissive
|
openenclave/openenclave
|
54a38e12d9aa73357d9f438a07cd8c07ffe5e6df
|
cdeb95c1ec163117de409295333b6b2702013e08
|
refs/heads/master
| 2023-08-14T16:43:32.049533
| 2023-07-21T15:58:54
| 2023-07-21T15:58:54
| 101,804,230
| 800
| 372
|
MIT
| 2023-09-12T20:26:02
| 2017-08-29T20:31:38
|
C
|
UTF-8
|
C
| false
| false
| 2,157
|
h
|
extradata.h
|
// Copyright (c) Open Enclave SDK contributors.
// Licensed under the MIT License.
#ifndef _OE_INTERNAL_SGX_EXTRADATA_H
#define _OE_INTERNAL_SGX_EXTRADATA_H
#include <openenclave/bits/defs.h>
#include <openenclave/bits/result.h>
#include <openenclave/bits/types.h>
#include <openenclave/internal/sgxcreate.h>
OE_EXTERNC_BEGIN
#define OE_LOAD_EXTRA_ENCLAVE_DATA_HOOK_ARG_MAGIC 0x793d33e0efb446d0
typedef struct oe_load_extra_enclave_data_hook_arg
{
uint64_t magic;
oe_sgx_load_context_t* sgx_load_context;
uint64_t enclave_base;
uint64_t enclave_start;
uint64_t base_vaddr; /* address relative to the enclave start */
uint64_t vaddr; /* address relative to the extra data start */
} oe_load_extra_enclave_data_hook_arg_t;
/**
* May be registered by the host application via
* oe_register_load_extra_enclave_data_hook to add additional enclave data pages
* immediately before the enclave heap. The hook will be invoked by the loader
* twice: In the first time, the loader constructs a dummy **arg** and passes
* **baseaddr** as zero, expecting the hook to invoke oe_load_extra_enclave_data
* and returns the total size of extra data (will be stored as part of **arg**).
* In the second time, the loader constructs the **arg** with necessary
* parameters and passes the **baseaddr** as the starting address to which the
* extra data will be loaded, expecting the hook to invoke
* oe_load_extra_enclave_data to load each extra data page.
*/
typedef oe_result_t (*oe_load_extra_enclave_data_hook_t)(
oe_load_extra_enclave_data_hook_arg_t* arg,
uint64_t baseaddr);
void oe_register_load_extra_enclave_data_hook(
oe_load_extra_enclave_data_hook_t hook);
/**
* Called by the host application (from oe_load_extra_enclave_data_hook) to add
* one page of enclave data. The **vaddr** is relative to the starting address
* of the extra data (use 0 for adding the first page).
*/
oe_result_t oe_load_extra_enclave_data(
oe_load_extra_enclave_data_hook_arg_t* arg,
uint64_t vaddr,
const void* page,
uint64_t flags,
bool extend);
OE_EXTERNC_END
#endif /* _OE_INTERNAL_SGX_EXTRADATA_H */
|
8a3f70ffc54b6676b2de32ec98d2e76bed14514d
|
909095842af0bbf2e769aff361b5af344abc7433
|
/engine/source/gui/editor/guiParticleGraphInspector_ScriptBinding.h
|
16897fd07ff840ecd2bae19146cf92db7551ac27
|
[
"MIT"
] |
permissive
|
TorqueGameEngines/Torque2D
|
316105e8b91cebf8660ff43871440e1c4d0b1c5e
|
2c555d6dd0172a05ddb6a14f014d22f335b4ccad
|
refs/heads/master
| 2023-09-01T02:22:53.663431
| 2023-05-02T20:45:37
| 2023-05-02T20:45:37
| 268,352,960
| 1,001
| 117
|
MIT
| 2023-05-02T20:19:19
| 2020-05-31T19:51:55
|
C
|
UTF-8
|
C
| false
| false
| 4,558
|
h
|
guiParticleGraphInspector_ScriptBinding.h
|
//-----------------------------------------------------------------------------
// Copyright (c) 2013 GarageGames, LLC
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//-----------------------------------------------------------------------------
ConsoleMethodGroupBeginWithDocs(GuiParticleGraphInspector, GuiControl)
/*! Sets the Particle Asset that will be used to draw graphs.
@param ParticleAsset The target of the graphs.
@return No return value.
*/
ConsoleMethodWithDocs(GuiParticleGraphInspector, inspect, ConsoleVoid, 3, 3, "(ParticleAsset)")
{
ParticleAsset* target = dynamic_cast<ParticleAsset*>(Sim::findObject(argv[2]));
if (!target)
{
if (dAtoi(argv[2]) > 0)
Con::warnf("%s::inspect(): Object is not a ParticleAsset: %s", argv[0], argv[2]);
return;
}
object->inspectObject(target);
}
/*! Sets the graph field to display.
@param FieldName The name of the field that should be displayed.
@param EmitterIndex If the field belongs to an emitter, include the index of the emitter to display.
@return No return value.
*/
ConsoleMethodWithDocs(GuiParticleGraphInspector, setDisplayField, ConsoleVoid, 3, 4, "(FieldName, [EmitterIndex])")
{
if (argc > 3)
{
object->setDisplayField(argv[2], dAtoi(argv[3]));
}
else
{
object->setDisplayField(argv[2]);
}
}
/*! Sets the graph inspector to use to show variance.
@param Inspector The GuiParticleGraphInspector that is tracking the variation.
@return No return value.
*/
ConsoleMethodWithDocs(GuiParticleGraphInspector, setVariationGraphInspector, ConsoleVoid, 3, 3, "(inspector)")
{
GuiParticleGraphInspector* inspector = dynamic_cast<GuiParticleGraphInspector*>(Sim::findObject(argv[2]));
if (!inspector)
{
if (dAtoi(argv[2]) > 0)
Con::warnf("%s::setVariationGraphInspector(): Object is not a GuiParticleGraphInspector: %s", argv[0], argv[2]);
return;
}
object->setVariationGraphInspector(inspector);
}
/*! Sets the area that will be displayed on the graph.
@param Area Four space-deliminated values representing left, bottom, right, top.
@return No return value.
*/
ConsoleMethodWithDocs(GuiParticleGraphInspector, setDisplayArea, ConsoleVoid, 3, 3, "(area (xMin / yMin / xMax / yMax))")
{
if (argc < 3)
{
Con::warnf("GuiParticleGraphInspector:setDisplayArea - Wrong number of arguments. Should be area(left / bottom / right / top).");
return;
}
U32 count = Utility::mGetStringElementCount(argv[2]);
if (count != 4)
{
Con::warnf("GuiParticleGraphInspector:setDisplayArea - Area does not have four values. Should be area(left / bottom / right / top).");
return;
}
StringTableEntry s1 = StringTable->insert(Utility::mGetStringElement(argv[2], 0));
StringTableEntry s2 = StringTable->insert(Utility::mGetStringElement(argv[2], 1));
StringTableEntry s3 = StringTable->insert(Utility::mGetStringElement(argv[2], 2));
StringTableEntry s4 = StringTable->insert(Utility::mGetStringElement(argv[2], 3));
object->setDisplayArea(s1, s2, s3, s4);
}
/*! Sets the labels to display on the graph.
@param LabelX The label that appears on the bottom of the graph.
@param LabelY The label that appears on the left of the graph.
@return No return value.
*/
ConsoleMethodWithDocs(GuiParticleGraphInspector, setDisplayLabels, ConsoleVoid, 4, 4, "(LabelX, LabelY)")
{
if (argc < 4)
{
Con::warnf("GuiParticleGraphInspector:setDisplayLabels - Wrong number of arguments. Should be LabelX and LabelY.");
return;
}
object->setDisplayLabels(argv[2], argv[3]);
}
ConsoleMethodGroupEndWithDocs(GuiParticleGraphInspector)
|
fe5c8e873c7ce2866af19582d3260729dca0a2ab
|
c0bfd93cd7f26a271268e504959256f1e02c6806
|
/components/fatfs/diskio/diskio_rawflash.c
|
382e5323937dac4982b7fce5f2a98e9d404a1687
|
[
"Apache-2.0"
] |
permissive
|
espressif/ESP8266_RTOS_SDK
|
606f396e92d2675d9854f0fabd88587fbbbaf267
|
af0cdc36fa2600033d0a09301c754008cf1503c1
|
refs/heads/master
| 2023-08-24T22:40:15.373553
| 2023-05-06T02:04:24
| 2023-05-06T02:04:24
| 27,584,181
| 3,163
| 1,749
|
Apache-2.0
| 2023-08-09T10:48:13
| 2014-12-05T09:27:12
|
C
|
UTF-8
|
C
| false
| false
| 2,899
|
c
|
diskio_rawflash.c
|
// Copyright 2015-2018 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string.h>
#include "diskio_impl.h"
#include "ffconf.h"
#include "ff.h"
#include "esp_log.h"
#include "diskio_rawflash.h"
#include "esp_compiler.h"
static const char* TAG = "diskio_rawflash";
const esp_partition_t* ff_raw_handles[FF_VOLUMES];
DSTATUS ff_raw_initialize (BYTE pdrv)
{
return 0;
}
DSTATUS ff_raw_status (BYTE pdrv)
{
return 0;
}
DRESULT ff_raw_read (BYTE pdrv, BYTE *buff, DWORD sector, UINT count)
{
ESP_LOGV(TAG, "ff_raw_read - pdrv=%i, sector=%i, count=%in", (unsigned int)pdrv, (unsigned int)sector, (unsigned int)count);
const esp_partition_t* part = ff_raw_handles[pdrv];
assert(part);
esp_err_t err = esp_partition_read(part, sector * SPI_FLASH_SEC_SIZE, buff, count * SPI_FLASH_SEC_SIZE);
if (unlikely(err != ESP_OK)) {
ESP_LOGE(TAG, "esp_partition_read failed (0x%x)", err);
return RES_ERROR;
}
return RES_OK;
}
DRESULT ff_raw_write (BYTE pdrv, const BYTE *buff, DWORD sector, UINT count)
{
return RES_ERROR;
}
DRESULT ff_raw_ioctl (BYTE pdrv, BYTE cmd, void *buff)
{
const esp_partition_t* part = ff_raw_handles[pdrv];
ESP_LOGV(TAG, "ff_raw_ioctl: cmd=%in", cmd);
assert(part);
switch (cmd) {
case CTRL_SYNC:
return RES_OK;
case GET_SECTOR_COUNT:
*((DWORD *) buff) = part->size / SPI_FLASH_SEC_SIZE;
return RES_OK;
case GET_SECTOR_SIZE:
*((WORD *) buff) = SPI_FLASH_SEC_SIZE;
return RES_OK;
case GET_BLOCK_SIZE:
return RES_ERROR;
}
return RES_ERROR;
}
esp_err_t ff_diskio_register_raw_partition(BYTE pdrv, const esp_partition_t* part_handle)
{
if (pdrv >= FF_VOLUMES) {
return ESP_ERR_INVALID_ARG;
}
static const ff_diskio_impl_t raw_impl = {
.init = &ff_raw_initialize,
.status = &ff_raw_status,
.read = &ff_raw_read,
.write = &ff_raw_write,
.ioctl = &ff_raw_ioctl
};
ff_diskio_register(pdrv, &raw_impl);
ff_raw_handles[pdrv] = part_handle;
return ESP_OK;
}
BYTE ff_diskio_get_pdrv_raw(const esp_partition_t* part_handle)
{
for (int i = 0; i < FF_VOLUMES; i++) {
if (part_handle == ff_raw_handles[i]) {
return i;
}
}
return 0xff;
}
|
74b548a23839d15583362ce0d5c3e486bfd97c6d
|
229a28fc18c13bfe1ba7fc81c38b03651ed8e93b
|
/sw/vendor/riscv-isa-sim/riscv/insns/addi.h
|
1bb5dcedfa8e3c1269cd2f37697c090535e70d04
|
[
"LicenseRef-scancode-bsd-3-clause-jtag",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pulp-platform/snitch
|
d3967742434fa21e8af71afa6be35ea5420166ca
|
d026f47843f0ea6c269244c4e6851e0e09141ec3
|
refs/heads/master
| 2023-08-24T08:42:36.230951
| 2023-06-19T09:34:05
| 2023-06-19T09:34:05
| 289,236,605
| 194
| 44
|
Apache-2.0
| 2023-07-11T12:46:26
| 2020-08-21T09:57:34
|
SystemVerilog
|
UTF-8
|
C
| false
| false
| 41
|
h
|
addi.h
|
WRITE_RD(sext_xlen(RS1 + insn.i_imm()));
|
3dda162d1a255af8072166efa0b779ff3283de3e
|
39b8d37edbc228c0ee43644f620fd5f7ce448ff8
|
/src/sclite/slm_v2/src/binlm2arpa.c
|
6bf383a346e1f764affb98ebba1343b3899515d6
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"NIST-Software"
] |
permissive
|
usnistgov/SCTK
|
e29feea7744e7a2a8204f5e90d0a6fd81e6ed690
|
f48376a203ab17f0d479995d87275db6772dcb4a
|
refs/heads/master
| 2023-01-31T15:08:23.855355
| 2022-09-08T11:53:45
| 2022-09-08T11:53:45
| 57,993,905
| 170
| 52
|
NOASSERTION
| 2023-01-27T22:28:02
| 2016-05-03T19:00:47
|
C
|
UTF-8
|
C
| false
| false
| 2,294
|
c
|
binlm2arpa.c
|
/*=====================================================================
======= COPYRIGHT NOTICE =======
Copyright (C) 1996, Carnegie Mellon University, Cambridge University,
Ronald Rosenfeld and Philip Clarkson.
All rights reserved.
This software is made available for research purposes only. It may be
redistributed freely for this purpose, in full or in part, provided
that this entire copyright notice is included on any copies of this
software and applications and derivations thereof.
This software is provided on an "as is" basis, without warranty of any
kind, either expressed or implied, as to any matter including, but not
limited to warranty of fitness of purpose, or merchantability, or
results obtained from use of this software.
======================================================================*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "ngram.h"
#include "toolkit.h"
#include "pc_libs/pc_general.h"
#include "rr_libs/general.h"
#include "idngram2lm.h"
#include "evallm.h"
void main (int argc,char **argv) {
char *bin_path;
int verbosity;
ng_t ng;
if (pc_flagarg(&argc,argv,"-help") || argc == 1) {
fprintf(stderr,"binlm2arpa : Convert a binary format language model to ARPA format.\n");
fprintf(stderr,"Usage : binlm2arpa -binary .binlm\n");
fprintf(stderr," -arpa .arpa\n");
fprintf(stderr," [ -verbosity n ]\n");
exit(1);
}
report_version(&argc,argv);
verbosity = pc_intarg(&argc,argv,"-verbosity",DEFAULT_VERBOSITY);
bin_path = salloc(pc_stringarg(&argc,argv,"-binary",""));
if (!strcmp(bin_path,"")) {
quit(-1,"Error : must specify a binary language model file.\n");
}
ng.arpa_filename = salloc(pc_stringarg(&argc,argv,"-arpa",""));
if (!strcmp(ng.arpa_filename,"")) {
quit(-1,"Error : must specify an ARPA language model file.\n");
}
ng.arpa_fp = rr_oopen(ng.arpa_filename);
pc_report_unk_args(&argc,argv,verbosity);
pc_message(verbosity,1,"Reading binary language model from %s...",bin_path);
load_lm(&ng,bin_path);
if (verbosity>=2) {
display_stats(&ng);
}
pc_message(verbosity,1,"Done\n");
write_arpa_lm(&ng,verbosity);
pc_message(verbosity,0,"binlm2arpa : Done.\n");
exit(0);
}
|
0cef7315e2ae4385bad48f7b55bf81ab520d8227
|
f367e4b66a1ee42e85830b31df88f63723c36a47
|
/lib/wasm-micro-runtime-WAMR-1.2.2/samples/multi-module/wasm-apps/mC.c
|
8b19a5b6690c550663954d7fa0fe785d580eda83
|
[
"Apache-2.0",
"LLVM-exception"
] |
permissive
|
fluent/fluent-bit
|
06873e441162b92941024e9a7e9e8fc934150bf7
|
1a41f49dc2f3ae31a780caa9ffd6137b1d703065
|
refs/heads/master
| 2023-09-05T13:44:55.347372
| 2023-09-05T10:14:33
| 2023-09-05T10:14:33
| 29,933,948
| 4,907
| 1,565
|
Apache-2.0
| 2023-09-14T10:17:02
| 2015-01-27T20:41:52
|
C
|
UTF-8
|
C
| false
| false
| 687
|
c
|
mC.c
|
#include <stdio.h>
#include <stdlib.h>
__attribute__((import_module("mA")))
__attribute__((import_name("A1"))) extern int
A1();
__attribute__((import_module("mB")))
__attribute__((import_name("B1"))) extern int
B1();
__attribute__((import_module("mB")))
__attribute__((import_name("B2"))) extern int
B2();
__attribute__((export_name("C1"))) int
C1()
{
return 31;
}
__attribute__((export_name("C2"))) int
C2()
{
return B1();
}
__attribute__((export_name("C3"))) int
C3()
{
return A1();
}
__attribute__((export_name("C4"))) int
C4()
{
return B2();
}
int
C5()
{
return C1() + C2() + C3() + 35;
}
int
main()
{
printf("%u\n", C5());
return EXIT_SUCCESS;
}
|
b1b1fc9860d7d0fdb996d3c9bd602edff44c1079
|
095e5e86c931af6553996b0a128c07d94b38cbca
|
/hpy/debug/src/dhqueue.c
|
86e8c176790197c93b2e0a64bbc7e9d43a46b3cf
|
[
"MIT"
] |
permissive
|
hpyproject/hpy
|
1dc9e5e855fa006b1728703c5925addbb43cf792
|
8310a762d78e3412464b1869959a77da013e6307
|
refs/heads/master
| 2023-09-03T21:18:17.273371
| 2023-07-24T07:26:14
| 2023-07-24T07:26:14
| 196,559,763
| 681
| 41
|
MIT
| 2023-07-24T07:26:16
| 2019-07-12T10:27:56
|
Python
|
UTF-8
|
C
| false
| false
| 2,708
|
c
|
dhqueue.c
|
#include "debug_internal.h"
// TODO: we need to make DHQueue thread-safe if we want to use the same
// context in multiple threads
void DHQueue_init(DHQueue *q) {
q->head = NULL;
q->tail = NULL;
q->size = 0;
}
void DHQueue_append(DHQueue *q, DHQueueNode *h) {
if (q->head == NULL) {
h->prev = NULL;
h->next = NULL;
q->head = h;
q->tail = h;
} else {
h->next = NULL;
h->prev = q->tail;
q->tail->next = h;
q->tail = h;
}
q->size++;
}
DHQueueNode *DHQueue_popfront(DHQueue *q)
{
assert(q->size > 0);
assert(q->head != NULL);
DHQueueNode *head = q->head;
if (q->size == 1) {
q->head = NULL;
q->tail = NULL;
q->size = 0;
}
else {
q->head = head->next;
q->head->prev = NULL;
q->size--;
}
// the following is not strictly necessary, but it makes thing much easier
// to debug in case of bugs
head->next = NULL;
head->prev = NULL;
return head;
}
void DHQueue_remove(DHQueue *q, DHQueueNode *h)
{
#ifndef NDEBUG
// if we are debugging, let's check that h is effectively in the queue
DHQueueNode *it = q->head;
bool found = false;
while(it != NULL) {
if (it == h) {
found = true;
break;
}
it = it->next;
}
assert(found);
#endif
if (q->size == 1) {
q->head = NULL;
q->tail = NULL;
} else if (h == q->head) {
assert(h->prev == NULL);
q->head = h->next;
q->head->prev = NULL;
} else if (h == q->tail) {
assert(h->next == NULL);
q->tail = h->prev;
q->tail->next = NULL;
}
else {
h->prev->next = h->next;
h->next->prev = h->prev;
}
q->size--;
h->next = NULL;
h->prev = NULL;
}
#ifndef NDEBUG
static void linked_item_sanity_check(DHQueueNode *h)
{
if (h == NULL)
return;
if (h->next != NULL)
assert(h->next->prev == h);
if (h->prev != NULL)
assert(h->prev->next == h);
}
#endif
void DHQueue_sanity_check(DHQueue *q)
{
#ifndef NDEBUG
if (q->head == NULL || q->tail == NULL) {
assert(q->head == NULL);
assert(q->tail == NULL);
assert(q->size == 0);
}
else {
assert(q->head->prev == NULL);
assert(q->tail->next == NULL);
assert(q->size > 0);
DHQueueNode *h = q->head;
HPy_ssize_t size = 0;
while(h != NULL) {
linked_item_sanity_check(h);
if (h->next == NULL)
assert(h == q->tail);
h = h->next;
size++;
}
assert(q->size == size);
}
#endif
}
|
35f40c1fbf39ec3a3ada2d3d38fafe52b83a1c6f
|
28d0f8c01599f8f6c711bdde0b59f9c2cd221203
|
/tests/lib/libc/atomic/t___sync_lock.c
|
efad87cddfa2320703786b9c6207b02adb4bf47d
|
[] |
no_license
|
NetBSD/src
|
1a9cbc22ed778be638b37869ed4fb5c8dd616166
|
23ee83f7c0aea0777bd89d8ebd7f0cde9880d13c
|
refs/heads/trunk
| 2023-08-31T13:24:58.105962
| 2023-08-27T15:50:47
| 2023-08-27T15:50:47
| 88,439,547
| 656
| 348
| null | 2023-07-20T20:07:24
| 2017-04-16T20:03:43
| null |
UTF-8
|
C
| false
| false
| 4,253
|
c
|
t___sync_lock.c
|
/* $NetBSD: t___sync_lock.c,v 1.1 2019/02/26 10:01:41 isaki Exp $ */
/*
* Copyright (C) 2019 Tetsuya Isaki. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__RCSID("$NetBSD: t___sync_lock.c,v 1.1 2019/02/26 10:01:41 isaki Exp $");
#include <atf-c.h>
#include <inttypes.h>
#include <machine/types.h> // for __HAVE_ATOMIC64_OPS
/*
* These tests don't examine the atomicity.
*/
/* XXX
* Depending on a combination of arch and compiler, __sync_* is
* implemented as compiler's builtin function. In that case, even
* if libc exports the function symbol, it is not used. As a result
* this tests will examine compiler's builtin functions.
* It's better to run only when target is actually in libc.
*/
#define OLDVAL (0x1122334455667788UL)
#define NEWVAL (0x8090a0b0c0d0e0f0UL)
#define atf_sync_tas(NAME, TYPE, FMT) \
ATF_TC(NAME); \
ATF_TC_HEAD(NAME, tc) \
{ \
atf_tc_set_md_var(tc, "descr", #NAME); \
} \
ATF_TC_BODY(NAME, tc) \
{ \
volatile TYPE val; \
TYPE newval; \
TYPE expval; \
TYPE expres; \
TYPE res; \
val = (TYPE)OLDVAL; \
newval = (TYPE)NEWVAL; \
expval = (TYPE)NEWVAL; \
expres = (TYPE)OLDVAL; \
res = NAME(&val, newval); \
ATF_REQUIRE_MSG(val == expval, \
"val expects 0x%" FMT " but 0x%" FMT, expval, val); \
ATF_REQUIRE_MSG(res == expres, \
"res expects 0x%" FMT " but 0x%" FMT, expres, res); \
}
atf_sync_tas(__sync_lock_test_and_set_1, uint8_t, PRIx8);
atf_sync_tas(__sync_lock_test_and_set_2, uint16_t, PRIx16);
atf_sync_tas(__sync_lock_test_and_set_4, uint32_t, PRIx32);
#ifdef __HAVE_ATOMIC64_OPS
atf_sync_tas(__sync_lock_test_and_set_8, uint64_t, PRIx64);
#endif
#define atf_sync_rel(NAME, TYPE, FMT) \
ATF_TC(NAME); \
ATF_TC_HEAD(NAME, tc) \
{ \
atf_tc_set_md_var(tc, "descr", #NAME); \
} \
ATF_TC_BODY(NAME, tc) \
{ \
volatile TYPE val; \
TYPE expval; \
val = (TYPE)OLDVAL; \
expval = (TYPE)0; \
NAME(&val); \
ATF_REQUIRE_MSG(val == expval, \
"val expects 0x%" FMT " but 0x%" FMT, expval, val); \
}
atf_sync_rel(__sync_lock_release_1, uint8_t, PRIx8);
atf_sync_rel(__sync_lock_release_2, uint16_t, PRIx16);
atf_sync_rel(__sync_lock_release_4, uint32_t, PRIx32);
#ifdef __HAVE_ATOMIC64_OPS
atf_sync_rel(__sync_lock_release_8, uint64_t, PRIx64);
#endif
/*
* __sync_synchronize(): This is just a link-time test.
*/
ATF_TC(__sync_synchronize);
ATF_TC_HEAD(__sync_synchronize, tc)
{
atf_tc_set_md_var(tc, "descr", "__sync_synchronize");
}
ATF_TC_BODY(__sync_synchronize, tc)
{
__sync_synchronize();
}
ATF_TP_ADD_TCS(tp)
{
ATF_TP_ADD_TC(tp, __sync_lock_test_and_set_1);
ATF_TP_ADD_TC(tp, __sync_lock_test_and_set_2);
ATF_TP_ADD_TC(tp, __sync_lock_test_and_set_4);
#ifdef __HAVE_ATOMIC64_OPS
ATF_TP_ADD_TC(tp, __sync_lock_test_and_set_8);
#endif
ATF_TP_ADD_TC(tp, __sync_lock_release_1);
ATF_TP_ADD_TC(tp, __sync_lock_release_2);
ATF_TP_ADD_TC(tp, __sync_lock_release_4);
#ifdef __HAVE_ATOMIC64_OPS
ATF_TP_ADD_TC(tp, __sync_lock_release_8);
#endif
ATF_TP_ADD_TC(tp, __sync_synchronize);
return atf_no_error();
}
|
e0b9fd45243c52de3922a181287d6df50bb56111
|
ed98b77f3f09b392e68a0d59c48eec299e883bb9
|
/applications/Ringpacking/tests/src/ringpacking/pattern.c
|
31f71563d58cc4a2211ddceb84d84611236ccda9
|
[
"Apache-2.0"
] |
permissive
|
scipopt/scip
|
c8ddbe7cdec0a3af5a230c04b74b76ffacbdcc33
|
dc856a4c966ea50bd5f52c58d7be4fea33706f4c
|
refs/heads/master
| 2023-08-19T11:39:12.578790
| 2023-08-15T20:05:58
| 2023-08-15T20:05:58
| 342,522,859
| 262
| 46
|
NOASSERTION
| 2023-08-03T07:37:45
| 2021-02-26T09:16:17
|
C
|
UTF-8
|
C
| false
| false
| 5,055
|
c
|
pattern.c
|
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* */
/* This file is part of the program and library */
/* SCIP --- Solving Constraint Integer Programs */
/* */
/* Copyright (c) 2002-2023 Zuse Institute Berlin (ZIB) */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); */
/* you may not use this file except in compliance with the License. */
/* You may obtain a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/* */
/* You should have received a copy of the Apache-2.0 license */
/* along with SCIP; see the file LICENSE. If not visit scipopt.org. */
/* */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/**@file pattern.c
* @brief unit test for testing pattern interface functions
* @author Benjamin Mueller
*/
/*---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8----+----9----+----0----+----1----+----2*/
#include "scip/scip.h"
#include "scip/scipdefplugins.h"
#include "probdata_rpa.h"
#include "pricer_rpa.h"
#include "pattern.h"
#include "include/scip_test.h"
static SCIP* scip;
static SCIP_PROBDATA* probdata;
static SCIP_PATTERN* rpattern;
static SCIP_PATTERN* cpattern;
/** setup of test run */
static
void setup(void)
{
SCIP_Real rexts[3] = {1.0, 0.6, 0.5};
SCIP_Real rints[3] = {1.0, 0.5, 0.0};
int demands[3] = {100, 100, 100};
/* initialize SCIP */
scip = NULL;
SCIP_CALL( SCIPcreate(&scip) );
/* include default plugins */
SCIP_CALL( SCIPincludeDefaultPlugins(scip) );
/* include ringpacking pricer */
SCIP_CALL( SCIPincludePricerRpa(scip) );
/* create a problem */
SCIP_CALL( SCIPcreateProbBasic(scip, "problem") );
/* create problem data */
SCIP_CALL( SCIPprobdataCreate(scip, "unit test", demands, rints, rexts, 3, 100.0, 100.0) );
probdata = SCIPgetProbData(scip);
cr_assert(probdata != NULL);
/* creates circular and rectangular pattern */
SCIP_CALL( SCIPpatternCreateCircular(scip, &cpattern, 1) );
SCIP_CALL( SCIPpatternCreateRectangular(scip, &rpattern) );
}
/** deinitialization method */
static
void teardown(void)
{
/* release patterns */
SCIPpatternRelease(scip, &rpattern);
SCIPpatternRelease(scip, &cpattern);
/* free SCIP */
SCIP_CALL( SCIPfree(&scip) );
cr_assert_null(scip);
cr_assert_eq(BMSgetMemoryUsed(), 0, "There is a memory leak!!");
}
/* test suite */
TestSuite(pattern, .init = setup, .fini = teardown);
/* checks the pattern */
Test(pattern, patterntype)
{
cr_expect(SCIPpatternGetPatternType(cpattern) == SCIP_PATTERNTYPE_CIRCULAR);
cr_expect(SCIPpatternGetPatternType(rpattern) == SCIP_PATTERNTYPE_RECTANGULAR);
}
/* checks the type of a circular pattern */
Test(pattern, type)
{
cr_expect(SCIPpatternGetCircleType(cpattern) == 1);
}
/* checks the position of an element */
Test(pattern, position)
{
SCIP_CALL( SCIPpatternAddElement(cpattern, 0, -1.0, 1.0) );
cr_expect(SCIPpatternGetElementPosX(cpattern, 0) == -1.0);
cr_expect(SCIPpatternGetElementPosY(cpattern, 0) == 1.0);
SCIP_CALL( SCIPpatternAddElement(cpattern, 0, -2.0, 2.0) );
cr_expect(SCIPpatternGetElementPosX(cpattern, 1) == -2.0);
cr_expect(SCIPpatternGetElementPosY(cpattern, 1) == 2.0);
}
/* checks the packable status */
Test(pattern, packable)
{
cr_expect(SCIPpatternGetPackableStatus(rpattern) == SCIP_PACKABLE_UNKNOWN);
SCIPpatternSetPackableStatus(rpattern, SCIP_PACKABLE_YES);
cr_expect(SCIPpatternGetPackableStatus(rpattern) == SCIP_PACKABLE_YES);
/* adding an element does not change packable status */
SCIP_CALL( SCIPpatternAddElement(rpattern, 0, 0.0, 0.0) );
cr_expect(SCIPpatternGetPackableStatus(rpattern) == SCIP_PACKABLE_YES);
/* removing an element does not change packable status */
SCIPpatternRemoveLastElements(rpattern, 1);
cr_expect(SCIPpatternGetPackableStatus(rpattern) == SCIP_PACKABLE_YES);
}
|
c0c1b703e6267f16cc37dde33e365dd264813ad5
|
eb266f888155c0fa59c923d7496e875c7259f1de
|
/src/wlr_signal.c
|
0f3815b3d76f69670e10ebaff6c0943e27eeb8ef
|
[
"BSD-2-Clause"
] |
permissive
|
werererer/japokwm
|
e7f46b210e7daf6a7d3a6bb8c61d912c40a2cb58
|
9cdc208ed70d4b58a805431992e4078171403edd
|
refs/heads/master
| 2023-05-25T18:46:40.826666
| 2023-01-15T22:26:59
| 2023-01-15T22:26:59
| 297,172,042
| 121
| 4
|
BSD-2-Clause
| 2023-01-15T22:27:00
| 2020-09-20T22:06:29
|
C
|
UTF-8
|
C
| false
| false
| 24
|
c
|
wlr_signal.c
|
#include "wlr_signal.h"
|
7e64e453ea2e01a3fa293f982c3eac1b59985fd7
|
79d343002bb63a44f8ab0dbac0c9f4ec54078c3a
|
/lib/libc/include/any-linux-any/linux/netfilter/xt_nfacct.h
|
5c4d97f2c1438a2d8ff7e36643036f721a7e6c2b
|
[
"MIT"
] |
permissive
|
ziglang/zig
|
4aa75d8d3bcc9e39bf61d265fd84b7f005623fc5
|
f4c9e19bc3213c2bc7e03d7b06d7129882f39f6c
|
refs/heads/master
| 2023-08-31T13:16:45.980913
| 2023-08-31T05:50:29
| 2023-08-31T05:50:29
| 40,276,274
| 25,560
| 2,399
|
MIT
| 2023-09-14T21:09:50
| 2015-08-06T00:51:28
|
Zig
|
UTF-8
|
C
| false
| false
| 420
|
h
|
xt_nfacct.h
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _XT_NFACCT_MATCH_H
#define _XT_NFACCT_MATCH_H
#include <linux/netfilter/nfnetlink_acct.h>
struct nf_acct;
struct xt_nfacct_match_info {
char name[NFACCT_NAME_MAX];
struct nf_acct *nfacct;
};
struct xt_nfacct_match_info_v1 {
char name[NFACCT_NAME_MAX];
struct nf_acct *nfacct __attribute__((aligned(8)));
};
#endif /* _XT_NFACCT_MATCH_H */
|
bf492955056047621c076fcaf55a3ac580697935
|
ecce427678ecff2a93a47f3b1b664202c7d27617
|
/actors/explosion/model.inc.c
|
8f41b8fa9640f5a736e660b52b0c8b050a57fcf5
|
[] |
no_license
|
sm64pc/sm64ex
|
ecf37f31b3f7426c4874254660d856030d789714
|
54cd27ccee45a2403b45f07a00d6043c51149969
|
refs/heads/nightly
| 2023-08-11T12:14:34.424168
| 2023-07-01T11:45:50
| 2023-07-01T11:45:50
| 262,091,731
| 795
| 238
| null | 2023-07-01T11:45:51
| 2020-05-07T15:43:27
|
C
|
UTF-8
|
C
| false
| false
| 4,037
|
c
|
model.inc.c
|
// Explosion
// 0x030009C8
static const Vtx explosion_seg3_vertex_030009C8[] = {
{{{ -128, -128, 0}, 0, { 0, 992}, {0xff, 0xff, 0xff, 0xff}}},
{{{ 128, -128, 0}, 0, { 992, 992}, {0xff, 0xff, 0xff, 0xff}}},
{{{ 128, 128, 0}, 0, { 992, 0}, {0xff, 0xff, 0xff, 0xff}}},
{{{ -128, 128, 0}, 0, { 0, 0}, {0xff, 0xff, 0xff, 0xff}}},
};
// 0x03000A08
ALIGNED8 static const u8 explosion_seg3_texture_03000A08[] = {
#include "actors/explosion/explosion_0.rgba16.inc.c"
};
// 0x03001208
ALIGNED8 static const u8 explosion_seg3_texture_03001208[] = {
#include "actors/explosion/explosion_1.rgba16.inc.c"
};
// 0x03001A08
ALIGNED8 static const u8 explosion_seg3_texture_03001A08[] = {
#include "actors/explosion/explosion_2.rgba16.inc.c"
};
// 0x03002208
ALIGNED8 static const u8 explosion_seg3_texture_03002208[] = {
#include "actors/explosion/explosion_3.rgba16.inc.c"
};
// 0x03002A08
ALIGNED8 static const u8 explosion_seg3_texture_03002A08[] = {
#include "actors/explosion/explosion_4.rgba16.inc.c"
};
// 0x03003208
ALIGNED8 static const u8 explosion_seg3_texture_03003208[] = {
#include "actors/explosion/explosion_5.rgba16.inc.c"
};
// 0x03003A08
ALIGNED8 static const u8 explosion_seg3_texture_03003A08[] = {
#include "actors/explosion/explosion_6.rgba16.inc.c"
};
// 0x03004208 - 0x03004298
const Gfx explosion_seg3_dl_03004208[] = {
gsDPSetCombineMode(G_CC_DECALFADEA, G_CC_DECALFADEA),
gsDPSetEnvColor(255, 255, 255, 150),
gsSPClearGeometryMode(G_LIGHTING),
gsSPTexture(0xFFFF, 0xFFFF, 0, G_TX_RENDERTILE, G_ON),
gsDPSetTile(G_IM_FMT_RGBA, G_IM_SIZ_16b, 0, 0, G_TX_LOADTILE, 0, G_TX_CLAMP, 5, G_TX_NOLOD, G_TX_CLAMP, 5, G_TX_NOLOD),
gsDPLoadSync(),
gsDPLoadBlock(G_TX_LOADTILE, 0, 0, 32 * 32 - 1, CALC_DXT(32, G_IM_SIZ_16b_BYTES)),
gsDPSetTile(G_IM_FMT_RGBA, G_IM_SIZ_16b, 8, 0, G_TX_RENDERTILE, 0, G_TX_CLAMP, 5, G_TX_NOLOD, G_TX_CLAMP, 5, G_TX_NOLOD),
gsDPSetTileSize(0, 0, 0, (32 - 1) << G_TEXTURE_IMAGE_FRAC, (32 - 1) << G_TEXTURE_IMAGE_FRAC),
gsSPVertex(explosion_seg3_vertex_030009C8, 4, 0),
gsSP2Triangles( 0, 1, 2, 0x0, 0, 2, 3, 0x0),
gsSPTexture(0xFFFF, 0xFFFF, 0, G_TX_RENDERTILE, G_OFF),
gsDPPipeSync(),
gsSPSetGeometryMode(G_LIGHTING),
gsDPSetEnvColor(255, 255, 255, 255),
gsDPSetCombineMode(G_CC_SHADE, G_CC_SHADE),
gsSPEndDisplayList(),
};
// 0x03004298 - 0x030042B0
const Gfx explosion_seg3_dl_03004298[] = {
gsDPPipeSync(),
gsDPSetTextureImage(G_IM_FMT_RGBA, G_IM_SIZ_16b, 1, explosion_seg3_texture_03000A08),
gsSPBranchList(explosion_seg3_dl_03004208),
};
// 0x030042B0 - 0x030042C8
const Gfx explosion_seg3_dl_030042B0[] = {
gsDPPipeSync(),
gsDPSetTextureImage(G_IM_FMT_RGBA, G_IM_SIZ_16b, 1, explosion_seg3_texture_03001208),
gsSPBranchList(explosion_seg3_dl_03004208),
};
// 0x030042C8 - 0x030042E0
const Gfx explosion_seg3_dl_030042C8[] = {
gsDPPipeSync(),
gsDPSetTextureImage(G_IM_FMT_RGBA, G_IM_SIZ_16b, 1, explosion_seg3_texture_03001A08),
gsSPBranchList(explosion_seg3_dl_03004208),
};
// 0x030042E0 - 0x030042F8
const Gfx explosion_seg3_dl_030042E0[] = {
gsDPPipeSync(),
gsDPSetTextureImage(G_IM_FMT_RGBA, G_IM_SIZ_16b, 1, explosion_seg3_texture_03002208),
gsSPBranchList(explosion_seg3_dl_03004208),
};
// 0x030042F8 - 0x03004310
const Gfx explosion_seg3_dl_030042F8[] = {
gsDPPipeSync(),
gsDPSetTextureImage(G_IM_FMT_RGBA, G_IM_SIZ_16b, 1, explosion_seg3_texture_03002A08),
gsSPBranchList(explosion_seg3_dl_03004208),
};
// 0x03004310 - 0x03004328
const Gfx explosion_seg3_dl_03004310[] = {
gsDPPipeSync(),
gsDPSetTextureImage(G_IM_FMT_RGBA, G_IM_SIZ_16b, 1, explosion_seg3_texture_03003208),
gsSPBranchList(explosion_seg3_dl_03004208),
};
// 0x03004328 - 0x03004340
const Gfx explosion_seg3_dl_03004328[] = {
gsDPPipeSync(),
gsDPSetTextureImage(G_IM_FMT_RGBA, G_IM_SIZ_16b, 1, explosion_seg3_texture_03003A08),
gsSPBranchList(explosion_seg3_dl_03004208),
};
|
bd07f38e72179a13237bb880092eaeb3f7e674b2
|
4ba76056c744ada0a01fb27b7cad8464cefc7aa0
|
/kernel/PX_Object_Joystick.c
|
7685fbfadeba0df9c6508c5d1ce567813cac26b6
|
[
"BSD-3-Clause"
] |
permissive
|
matrixcascade/PainterEngine
|
23e110e25a39e1d016ed7936eac23e06932456b4
|
1d3e6e85f337e8a8db44680094ab3f4b988507cb
|
refs/heads/master
| 2023-08-29T11:29:26.934779
| 2023-08-23T09:23:04
| 2023-08-23T09:23:04
| 190,119,926
| 1,989
| 253
|
BSD-3-Clause
| 2023-09-05T12:19:31
| 2019-06-04T03:08:26
|
C
|
UTF-8
|
C
| false
| false
| 6,572
|
c
|
PX_Object_Joystick.c
|
#include "PX_Object_Joystick.h"
PX_Object_Joystick *PX_Object_GetJoystick(PX_Object *pObject)
{
if (pObject->Type != PX_OBJECT_TYPE_JOYSTICK)
{
return PX_NULL;
}
return (PX_Object_Joystick *)pObject->pObject;
}
px_double PX_Object_JoystickGetAngle(PX_Object *pObject)
{
if (pObject->Type != PX_OBJECT_TYPE_JOYSTICK)
{
PX_ASSERT();
}
return ((PX_Object_Joystick *)(pObject->pObject))->Angle;
}
px_double PX_Object_JoystickGetDistance(PX_Object *pObject)
{
if (pObject->Type != PX_OBJECT_TYPE_JOYSTICK)
{
PX_ASSERT();
}
return ((PX_Object_Joystick *)(pObject->pObject))->Distance;
}
px_point2D PX_Object_JoystickGetVector(PX_Object *pObject)
{
px_point2D p;
if (pObject->Type != PX_OBJECT_TYPE_JOYSTICK)
{
PX_ASSERT();
}
else
{
p.x = ((PX_Object_Joystick *)(pObject->pObject))->Distance *
(px_float)PX_cosd(((PX_Object_Joystick *)(pObject->pObject))->Angle);
p.y = ((PX_Object_Joystick *)(pObject->pObject))->Distance *
(px_float)PX_sind(((PX_Object_Joystick *)(pObject->pObject))->Angle);
}
return p;
}
///////////////////////////////////////////////
px_void Func_JoystickRender(px_surface *pSurface, PX_Object *pObject, px_uint elapsed);
px_void Func_JoystickOnCursorDown(PX_Object *pObject, PX_Object_Event e, px_void *ptr);
px_void Func_JoystickOnCursorDrag(PX_Object *pObject, PX_Object_Event e, px_void *ptr);
px_void Func_JoystickOnCursorUp(PX_Object *pObject, PX_Object_Event e, px_void *ptr);
PX_Object *PX_Object_JoystickCreate(
px_memorypool *mp, PX_Object *Parent,
px_float x, px_float y,
px_float actionAreaRadius, px_float joystickRadius, px_float senseRadius,
px_color actionAreaColor, px_color joystickColor)
{
px_float z = 0.0f;
PX_Object *pObject;
PX_Object_Joystick joystick;
joystick.ActionAreaColor = actionAreaColor;
joystick.JoystickColor = joystickColor;
joystick.Distance = 0.0;
joystick.Angle = 0.0;
joystick.ActionAreaRadius = actionAreaRadius;
joystick.JoystickRadius = joystickRadius;
joystick.SenseRadius = senseRadius > actionAreaRadius + joystickRadius ? senseRadius : actionAreaRadius + joystickRadius;
pObject = PX_ObjectCreateEx(mp, Parent, x, y, z, 0.0f, 0.0f, 0.0f, PX_OBJECT_TYPE_JOYSTICK, PX_NULL, Func_JoystickRender, PX_NULL, &joystick, sizeof(PX_Object_Joystick));
if (pObject == PX_NULL)
{
return PX_NULL;
}
pObject->diameter = 2 * joystick.SenseRadius;
PX_ObjectRegisterEvent(pObject, PX_OBJECT_EVENT_CURSORDOWN, Func_JoystickOnCursorDown, PX_NULL);
PX_ObjectRegisterEvent(pObject, PX_OBJECT_EVENT_CURSORDRAG, Func_JoystickOnCursorDrag, PX_NULL);
PX_ObjectRegisterEvent(pObject, PX_OBJECT_EVENT_CURSORUP, Func_JoystickOnCursorUp, PX_NULL);
return pObject;
}
px_void Func_JoystickRender(px_surface *pSurface, PX_Object *pObject, px_uint elapsed)
{
PX_Object_Joystick *pJoystick = (PX_Object_Joystick *)pObject->pObject;
px_float objx, objy, objWidth, objHeight;
px_float inheritX, inheritY;
PX_ObjectGetInheritXY(pObject, &inheritX, &inheritY);
objx = (pObject->x + inheritX);
objy = (pObject->y + inheritY);
objWidth = pObject->Width;
objHeight = pObject->Height;
PX_GeoDrawSolidCircle(
pSurface,
(px_int)objx,
(px_int)objy,
(px_int)pJoystick->ActionAreaRadius,
pJoystick->ActionAreaColor);
PX_GeoDrawBall(
pSurface,
(px_float)(objx + pJoystick->Distance * PX_cosd(pJoystick->Angle)),
(px_float)(objy + pJoystick->Distance * PX_sind(pJoystick->Angle)),
(px_float)(pJoystick->JoystickRadius),
pJoystick->JoystickColor);
}
px_void Func_JoystickOnCursorDown(PX_Object *pObject, PX_Object_Event e, px_void *ptr)
{
PX_Object_Joystick *pJoystick = (PX_Object_Joystick *)pObject->pObject;
px_float objx, objy, objWidth, objHeight;
px_float inheritX, inheritY;
px_float x, y, r;
PX_ObjectGetInheritXY(pObject, &inheritX, &inheritY);
objx = (pObject->x + inheritX);
objy = (pObject->y + inheritY);
objWidth = pObject->Width;
objHeight = pObject->Height;
x = PX_Object_Event_GetCursorX(e) - objx,
y = PX_Object_Event_GetCursorY(e) - objy,
r = PX_sqrt(x * x + y * y);
if (pJoystick->IsActive)
{
return;
}
if (r < pJoystick->SenseRadius)
{
pJoystick->IsActive = PX_TRUE;
pJoystick->Distance = r < pJoystick->ActionAreaRadius ? r : pJoystick->ActionAreaRadius;
pJoystick->Angle = (px_float)PX_atan2(y, x);
}
else
{
pJoystick->Distance = 0.0f;
}
}
px_void Func_JoystickOnCursorDrag(PX_Object *pObject, PX_Object_Event e, px_void *ptr)
{
PX_Object_Joystick *pJoystick = (PX_Object_Joystick *)pObject->pObject;
px_float objx, objy, objWidth, objHeight;
px_float inheritX, inheritY;
px_float x, y, r;
PX_ObjectGetInheritXY(pObject, &inheritX, &inheritY);
objx = (pObject->x + inheritX);
objy = (pObject->y + inheritY);
objWidth = pObject->Width;
objHeight = pObject->Height;
x = PX_Object_Event_GetCursorX(e) - objx,
y = PX_Object_Event_GetCursorY(e) - objy,
r = PX_sqrt(x * x + y * y);
if (!pJoystick->IsActive)
{
return;
}
if (r <= pJoystick->SenseRadius)
{
pJoystick->Distance = r < pJoystick->ActionAreaRadius ? r : pJoystick->ActionAreaRadius;
}
else
{
pJoystick->Distance = 0.0f;
pJoystick->IsActive = PX_FALSE;
return;
}
pJoystick->Angle = (px_float)PX_atan2(y, x);
}
px_void Func_JoystickOnCursorUp(PX_Object *pObject, PX_Object_Event e, px_void *ptr)
{
PX_Object_Joystick *pJoystick = (PX_Object_Joystick *)pObject->pObject;
px_float objx, objy, objWidth, objHeight;
px_float inheritX, inheritY;
px_float x, y, r;
PX_ObjectGetInheritXY(pObject, &inheritX, &inheritY);
objx = (pObject->x + inheritX);
objy = (pObject->y + inheritY);
objWidth = pObject->Width;
objHeight = pObject->Height;
x = PX_Object_Event_GetCursorX(e) - objx,
y = PX_Object_Event_GetCursorY(e) - objy,
r = PX_sqrt(x * x + y * y);
if (r <= pJoystick->SenseRadius)
{
pJoystick->Distance = 0.0f;
pJoystick->IsActive = PX_FALSE;
}
}
|
f70d355a233a72fbead3c0ef81a94640dc89587e
|
eecd5e4c50d8b78a769bcc2675250576bed34066
|
/src/sys/tutorials/ex7.c
|
e981d1fc7ed027d46f5ee011f8e3c1c2733f5b59
|
[
"BSD-2-Clause"
] |
permissive
|
petsc/petsc
|
3b1a04fea71858e0292f9fd4d04ea11618c50969
|
9c5460f9064ca60dd71a234a1f6faf93e7a6b0c9
|
refs/heads/main
| 2023-08-17T20:51:16.507070
| 2023-08-17T16:08:06
| 2023-08-17T16:08:06
| 8,691,401
| 341
| 169
|
NOASSERTION
| 2023-03-29T11:02:58
| 2013-03-10T20:55:21
|
C
|
UTF-8
|
C
| false
| false
| 10,775
|
c
|
ex7.c
|
const char help[] = "How to create a log handler using the PetscLogHandler interface";
#include <petscsys.h>
#include <petsc/private/hashmapi.h> // use PetscHMapI: a PetscInt -> PetscInt hashmap
#include <petsctime.h> // use PetscTimeSubtract() and PetscTimeAdd()
#include <petscviewer.h>
#include <petsc/private/loghandlerimpl.h> // use the struct _p_PetscLogHandler behind PetscLogHandler
/* Log handlers that use the PetscLogHandler interface get their information
from the PetscLogState available to each handler and the user-defined
context pointer. Compare this example to src/sys/tutorials/ex6.c.
A logging event can be started multiple times before it stops: for example,
a linear solve may involve a subsolver, so PetscLogEventBegin() can be
called for the event KSP_Solve multiple times before a call to
PetscLogEventEnd(). The user defined handler in this example shows how many
times an event is running. */
#define PETSCLOGHANDLEREX7 "ex7"
typedef struct _HandlerCtx *HandlerCtx;
struct _HandlerCtx {
PetscHMapI running;
PetscInt num_objects_created;
PetscInt num_objects_destroyed;
};
static PetscErrorCode HandlerCtxCreate(HandlerCtx *ctx_p)
{
HandlerCtx ctx;
PetscFunctionBegin;
PetscCall(PetscNew(ctx_p));
ctx = *ctx_p;
PetscCall(PetscHMapICreate(&ctx->running));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode HandlerCtxDestroy(HandlerCtx *ctx_p)
{
HandlerCtx ctx;
PetscFunctionBegin;
ctx = *ctx_p;
*ctx_p = NULL;
PetscCall(PetscHMapIDestroy(&ctx->running));
PetscCall(PetscFree(ctx));
PetscFunctionReturn(PETSC_SUCCESS);
}
#define PrintData(format_string, ...) \
do { \
PetscMPIInt _rank; \
PetscLogDouble _time; \
PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &_rank)); \
PetscCall(PetscTime(&_time)); \
PetscCall(PetscPrintf(PETSC_COMM_SELF, "[%d:%-7g:%-33s] " format_string, _rank, _time, PETSC_FUNCTION_NAME, __VA_ARGS__)); \
} while (0)
static PetscErrorCode PetscLogHandlerEventBegin_Ex7(PetscLogHandler h, PetscLogEvent e, PetscObject o1, PetscObject o2, PetscObject o3, PetscObject o4)
{
HandlerCtx ctx;
PetscInt count;
PetscLogState state;
PetscLogEventInfo event_info;
PetscBool is_active;
PetscFunctionBegin;
// This callback will only be invoked if the event is active
PetscCall(PetscLogHandlerGetState(h, &state));
PetscCall(PetscLogStateEventGetActive(state, PETSC_DEFAULT, e, &is_active));
PetscAssert(is_active, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Event handler called for inactive event");
ctx = (HandlerCtx)h->data;
PetscCall(PetscHMapIGetWithDefault(ctx->running, (PetscInt)e, 0, &count));
count += 1;
PetscCall(PetscLogStateEventGetInfo(state, e, &event_info));
PrintData("Event \"%s\" started: now running %" PetscInt_FMT " times\n", event_info.name, count);
PetscCall(PetscHMapISet(ctx->running, (PetscInt)e, count));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscLogHandlerEventEnd_Ex7(PetscLogHandler h, PetscLogEvent e, PetscObject o1, PetscObject o2, PetscObject o3, PetscObject o4)
{
HandlerCtx ctx;
PetscInt count;
PetscLogState state;
PetscLogEventInfo event_info;
PetscFunctionBegin;
ctx = (HandlerCtx)h->data;
PetscCall(PetscLogHandlerGetState(h, &state));
PetscCall(PetscHMapIGetWithDefault(ctx->running, (PetscInt)e, 0, &count));
count -= 1;
PetscCall(PetscLogStateEventGetInfo(state, e, &event_info));
PrintData("Event \"%s\" stopped: now running %" PetscInt_FMT " times\n", event_info.name, count);
PetscCall(PetscHMapISet(ctx->running, (PetscInt)e, count));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscLogHandlerEventSync_Ex7(PetscLogHandler h, PetscLogEvent e, MPI_Comm comm)
{
PetscLogState state;
PetscLogEventInfo event_info;
PetscLogDouble time = 0.0;
PetscFunctionBegin;
PetscCall(PetscTimeSubtract(&time));
PetscCallMPI(MPI_Barrier(comm));
PetscCall(PetscTimeAdd(&time));
PetscCall(PetscLogHandlerGetState(h, &state));
PetscCall(PetscLogStateEventGetInfo(state, e, &event_info));
PrintData("Event \"%s\" synced: took %g seconds\n", event_info.name, (double)time);
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscLogHandlerObjectCreate_Ex7(PetscLogHandler h, PetscObject obj)
{
HandlerCtx ctx;
PetscFunctionBegin;
ctx = (HandlerCtx)h->data;
ctx->num_objects_created++;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscLogHandlerObjectDestroy_Ex7(PetscLogHandler h, PetscObject obj)
{
HandlerCtx ctx;
PetscFunctionBegin;
ctx = (HandlerCtx)h->data;
ctx->num_objects_destroyed++;
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscLogHandlerStagePush_Ex7(PetscLogHandler h, PetscLogStage new_stage)
{
PetscLogStage old_stage;
PetscLogStageInfo new_info;
PetscLogState state;
PetscFunctionBegin;
PetscCall(PetscLogHandlerGetState(h, &state));
PetscCall(PetscLogStateStageGetInfo(state, new_stage, &new_info));
PetscCall(PetscLogStateGetCurrentStage(state, &old_stage));
if (old_stage >= 0) {
PetscLogStageInfo old_info;
PetscCall(PetscLogStateStageGetInfo(state, old_stage, &old_info));
PrintData("Pushing stage stage \"%s\" (replacing \"%s\")\n", new_info.name, old_info.name);
} else {
PrintData("Pushing initial stage \"%s\"\n", new_info.name);
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscLogHandlerStagePop_Ex7(PetscLogHandler h, PetscLogStage old_stage)
{
PetscLogStage new_stage;
PetscLogStageInfo old_info;
PetscLogState state;
PetscFunctionBegin;
PetscCall(PetscLogHandlerGetState(h, &state));
PetscCall(PetscLogStateStageGetInfo(state, old_stage, &old_info));
PetscCall(PetscLogStateGetCurrentStage(state, &new_stage));
if (new_stage >= 0) {
PetscLogStageInfo new_info;
PetscCall(PetscLogStateStageGetInfo(state, new_stage, &new_info));
PrintData("Popping stage \"%s\" (back to \"%s\")\n", old_info.name, new_info.name);
} else {
PrintData("Popping initial stage \"%s\"\n", old_info.name);
}
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscLogHandlerView_Ex7(PetscLogHandler h, PetscViewer viewer)
{
PetscBool is_ascii;
PetscFunctionBegin;
PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &is_ascii));
if (is_ascii) {
HandlerCtx ctx;
PetscInt num_entries;
ctx = (HandlerCtx)h->data;
PetscCall(PetscHMapIGetSize(ctx->running, &num_entries));
PetscCall(PetscViewerASCIIPrintf(viewer, "%" PetscInt_FMT " events were seen by the handler\n", num_entries));
PetscCall(PetscViewerASCIIPrintf(viewer, "%" PetscInt_FMT " object(s) were created and %" PetscInt_FMT " object(s) were destroyed\n", ctx->num_objects_created, ctx->num_objects_created));
}
PetscFunctionReturn(PETSC_SUCCESS);
}
// An example of overloading one of the methods defined using PetscObjectComposeFunction()
static PetscErrorCode PetscLogHandlerLogObjectState_Ex7(PetscLogHandler h, PetscObject obj, const char format[], va_list argp)
{
const char *name;
PetscFunctionBegin;
PetscCall(PetscObjectGetName(obj, &name));
PrintData("Logged state for \"%s\": ", name);
PetscCall(PetscVFPrintf(PETSC_STDOUT, format, argp));
PetscCall(PetscPrintf(PETSC_COMM_SELF, "\n"));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscLogHandlerDestroy_Ex7(PetscLogHandler h)
{
HandlerCtx ctx;
PetscFunctionBegin;
ctx = (HandlerCtx)h->data;
PetscCall(HandlerCtxDestroy(&ctx));
PetscCall(PetscObjectComposeFunction((PetscObject)h, "PetscLogHandlerLogObjectState_C", NULL));
PetscFunctionReturn(PETSC_SUCCESS);
}
static PetscErrorCode PetscLogHandlerCreate_Ex7(PetscLogHandler handler)
{
HandlerCtx ctx;
PetscFunctionBegin;
PetscCall(HandlerCtxCreate(&ctx));
handler->data = (void *)ctx;
handler->ops->destroy = PetscLogHandlerDestroy_Ex7;
handler->ops->view = PetscLogHandlerView_Ex7;
handler->ops->eventbegin = PetscLogHandlerEventBegin_Ex7;
handler->ops->eventend = PetscLogHandlerEventEnd_Ex7;
handler->ops->eventsync = PetscLogHandlerEventSync_Ex7;
handler->ops->objectcreate = PetscLogHandlerObjectCreate_Ex7;
handler->ops->objectdestroy = PetscLogHandlerObjectDestroy_Ex7;
handler->ops->stagepush = PetscLogHandlerStagePush_Ex7;
handler->ops->stagepop = PetscLogHandlerStagePop_Ex7;
PetscCall(PetscObjectComposeFunction((PetscObject)handler, "PetscLogHandlerLogObjectState_C", PetscLogHandlerLogObjectState_Ex7));
PetscFunctionReturn(PETSC_SUCCESS);
}
int main(int argc, char **argv)
{
PetscClassId user_classid;
PetscLogEvent event_1, event_2;
PetscLogStage stage_1;
PetscContainer user_object;
PetscLogHandler h;
PetscLogDouble time;
PetscLogHandlerType type;
PetscCall(PetscInitialize(&argc, &argv, NULL, help));
PetscCall(PetscLogHandlerRegister(PETSCLOGHANDLEREX7, PetscLogHandlerCreate_Ex7));
PetscCall(PetscLogHandlerCreate(PETSC_COMM_WORLD, &h));
PetscCall(PetscLogHandlerSetType(h, PETSCLOGHANDLEREX7));
PetscCall(PetscLogHandlerGetType(h, &type));
PetscCall(PetscPrintf(PETSC_COMM_WORLD, "Log handler type is: %s\n", type));
PetscCall(PetscLogHandlerStart(h));
PetscCall(PetscClassIdRegister("User class", &user_classid));
PetscCall(PetscLogEventRegister("Event 1", user_classid, &event_1));
PetscCall(PetscLogEventRegister("Event 2", user_classid, &event_2));
PetscCall(PetscLogStageRegister("Stage 1", &stage_1));
PetscCall(PetscLogEventBegin(event_1, NULL, NULL, NULL, NULL));
PetscCall(PetscLogStagePush(stage_1));
PetscCall(PetscLogEventBegin(event_2, NULL, NULL, NULL, NULL));
PetscCall(PetscLogEventSync(event_1, PETSC_COMM_WORLD));
PetscCall(PetscLogEventBegin(event_1, NULL, NULL, NULL, NULL));
PetscCall(PetscTime(&time));
PetscCall(PetscContainerCreate(PETSC_COMM_SELF, &user_object));
PetscCall(PetscObjectSetName((PetscObject)user_object, "User Container"));
PetscCall(PetscLogHandlerLogObjectState(h, (PetscObject)user_object, "Created at %e", time));
PetscCall(PetscContainerDestroy(&user_object));
PetscCall(PetscLogEventEnd(event_1, NULL, NULL, NULL, NULL));
PetscCall(PetscLogEventEnd(event_2, NULL, NULL, NULL, NULL));
PetscCall(PetscLogStagePop());
PetscCall(PetscLogEventEnd(event_1, NULL, NULL, NULL, NULL));
PetscCall(PetscLogHandlerStop(h));
PetscCall(PetscLogHandlerView(h, PETSC_VIEWER_STDOUT_WORLD));
PetscCall(PetscLogHandlerDestroy(&h));
PetscCall(PetscFinalize());
return 0;
}
/*TEST
test:
requires: defined(PETSC_USE_LOG)
suffix: 0
filter: sed -E "s/:[^:]+:/:time_removed:/g"
TEST*/
|
a54dbb4d5c20b02baa563a26ed3c0697f41efab7
|
e6da5a3210800cdfde59f3bbb7986ff3fc878598
|
/src/include/catalog/pg_publication.h
|
4e3a8241ea838f9f9e306cfa60cdf6ad51ddb973
|
[
"BSD-3-Clause",
"PostgreSQL",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
Tencent/TBase
|
b66f13583ce6cd02ee5d453e2ce5a3a61e8b8f13
|
7cf7f8afbcab7290538ad5e65893561710be3dfa
|
refs/heads/master
| 2023-09-04T03:27:38.289238
| 2023-03-09T12:02:41
| 2023-03-09T12:18:46
| 220,177,733
| 1,433
| 283
|
NOASSERTION
| 2023-07-31T07:31:58
| 2019-11-07T07:34:03
|
C
|
UTF-8
|
C
| false
| false
| 6,891
|
h
|
pg_publication.h
|
/*
* Tencent is pleased to support the open source community by making TBase available.
*
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
*
* TBase is licensed under the BSD 3-Clause License, except for the third-party component listed below.
*
* A copy of the BSD 3-Clause License is included in this file.
*
* Other dependencies and licenses:
*
* Open Source Software Licensed Under the PostgreSQL License:
* --------------------------------------------------------------------
* 1. Postgres-XL XL9_5_STABLE
* Portions Copyright (c) 2015-2016, 2ndQuadrant Ltd
* Portions Copyright (c) 2012-2015, TransLattice, Inc.
* Portions Copyright (c) 2010-2017, Postgres-XC Development Group
* Portions Copyright (c) 1996-2015, The PostgreSQL Global Development Group
* Portions Copyright (c) 1994, The Regents of the University of California
*
* Terms of the PostgreSQL License:
* --------------------------------------------------------------------
* Permission to use, copy, modify, and distribute this software and its
* documentation for any purpose, without fee, and without a written agreement
* is hereby granted, provided that the above copyright notice and this
* paragraph and the following two paragraphs appear in all copies.
*
* IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
* LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
* DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
*
* Terms of the BSD 3-Clause License:
* --------------------------------------------------------------------
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of THL A29 Limited nor the names of its contributors may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
/*-------------------------------------------------------------------------
*
* pg_publication.h
* definition of the relation sets relation (pg_publication)
*
* Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/catalog/pg_publication.h
*
* NOTES
* the genbki.pl script reads this file and generates .bki
* information from the DATA() statements.
*
*-------------------------------------------------------------------------
*/
#ifndef PG_PUBLICATION_H
#define PG_PUBLICATION_H
#include "catalog/genbki.h"
#include "catalog/objectaddress.h"
/* ----------------
* pg_publication definition. cpp turns this into
* typedef struct FormData_pg_publication
*
* ----------------
*/
#define PublicationRelationId 6104
CATALOG(pg_publication,6104)
{
NameData pubname; /* name of the publication */
Oid pubowner; /* publication owner */
/*
* indicates that this is special publication which should encompass all
* tables in the database (except for the unlogged and temp ones)
*/
bool puballtables;
/* true if inserts are published */
bool pubinsert;
/* true if updates are published */
bool pubupdate;
/* true if deletes are published */
bool pubdelete;
} FormData_pg_publication;
/* ----------------
* Form_pg_publication corresponds to a pointer to a tuple with
* the format of pg_publication relation.
* ----------------
*/
typedef FormData_pg_publication *Form_pg_publication;
/* ----------------
* compiler constants for pg_publication
* ----------------
*/
#define Natts_pg_publication 6
#define Anum_pg_publication_pubname 1
#define Anum_pg_publication_pubowner 2
#define Anum_pg_publication_puballtables 3
#define Anum_pg_publication_pubinsert 4
#define Anum_pg_publication_pubupdate 5
#define Anum_pg_publication_pubdelete 6
typedef struct PublicationActions
{
bool pubinsert;
bool pubupdate;
bool pubdelete;
} PublicationActions;
typedef struct Publication
{
Oid oid;
char *name;
bool alltables;
PublicationActions pubactions;
} Publication;
extern Publication *GetPublication(Oid pubid);
extern Publication *GetPublicationByName(const char *pubname, bool missing_ok);
extern List *GetRelationPublications(Oid relid);
extern List *GetPublicationRelations(Oid pubid);
extern List *GetAllTablesPublications(void);
extern List *GetAllTablesPublicationRelations(void);
extern ObjectAddress publication_add_relation(Oid pubid, Relation targetrel,
bool if_not_exists);
extern Oid get_publication_oid(const char *pubname, bool missing_ok);
extern char *get_publication_name(Oid pubid);
extern Datum pg_get_publication_tables(PG_FUNCTION_ARGS);
#ifdef __STORAGE_SCALABLE__
extern ObjectAddress publication_add_shard(Oid pubid, int32 shardid,
bool if_not_exists);
extern List *GetPublicationShards(Oid pubid);
#endif
#endif /* PG_PUBLICATION_H */
|
6aa5c6c62b897638269e6b9eaf8fa017a8e7ca3a
|
88ae8695987ada722184307301e221e1ba3cc2fa
|
/third_party/libdrm/src/tests/tegra/drm-test.h
|
f11aed42343e0f278c8d63c730a29bc615e31954
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"GPL-2.0-or-later",
"Apache-2.0",
"LGPL-2.0-or-later"
] |
permissive
|
iridium-browser/iridium-browser
|
71d9c5ff76e014e6900b825f67389ab0ccd01329
|
5ee297f53dc7f8e70183031cff62f37b0f19d25f
|
refs/heads/master
| 2023-08-03T16:44:16.844552
| 2023-07-20T15:17:00
| 2023-07-23T16:09:30
| 220,016,632
| 341
| 40
|
BSD-3-Clause
| 2021-08-13T13:54:45
| 2019-11-06T14:32:31
| null |
UTF-8
|
C
| false
| false
| 2,316
|
h
|
drm-test.h
|
/*
* Copyright © 2014 NVIDIA Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef TEGRA_DRM_TEST_H
#define TEGRA_DRM_TEST_H
#include <stdint.h>
#include <stdlib.h>
#include "xf86drmMode.h"
struct drm_screen {
int fd;
unsigned int width;
unsigned int height;
unsigned int pitch;
unsigned int depth;
unsigned int bpp;
drmModeModeInfo mode;
uint32_t connector;
uint32_t old_fb;
uint32_t format;
uint32_t crtc;
};
struct drm_framebuffer {
unsigned int width;
unsigned int height;
unsigned int pitch;
uint32_t format;
uint32_t handle;
void *data;
int fd;
};
int drm_screen_open(struct drm_screen **screenp, int fd);
int drm_screen_close(struct drm_screen *screen);
int drm_screen_set_framebuffer(struct drm_screen *screen,
struct drm_framebuffer *fb);
int drm_framebuffer_new(struct drm_framebuffer **fbp,
struct drm_screen *screen, uint32_t handle,
unsigned int width, unsigned int height,
unsigned int pitch, uint32_t format,
void *data);
int drm_framebuffer_free(struct drm_framebuffer *fb);
int drm_open(const char *path);
void drm_close(int fd);
#endif
|
e30f7e438eedef4fa093f7a026530072edde92a3
|
9ceacf33fd96913cac7ef15492c126d96cae6911
|
/usr.bin/vi/ex/ex_print.c
|
68d9bb21afce48d0ca45f365dfa65179eb57d125
|
[
"BSD-3-Clause"
] |
permissive
|
openbsd/src
|
ab97ef834fd2d5a7f6729814665e9782b586c130
|
9e79f3a0ebd11a25b4bff61e900cb6de9e7795e9
|
refs/heads/master
| 2023-09-02T18:54:56.624627
| 2023-09-02T15:16:12
| 2023-09-02T15:16:12
| 66,966,208
| 3,394
| 1,235
| null | 2023-08-08T02:42:25
| 2016-08-30T18:18:25
|
C
|
UTF-8
|
C
| false
| false
| 6,421
|
c
|
ex_print.c
|
/* $OpenBSD: ex_print.c,v 1.13 2016/05/27 09:18:12 martijn Exp $ */
/*-
* Copyright (c) 1992, 1993, 1994
* The Regents of the University of California. All rights reserved.
* Copyright (c) 1992, 1993, 1994, 1995, 1996
* Keith Bostic. All rights reserved.
*
* See the LICENSE file for redistribution information.
*/
#include "config.h"
#include <sys/types.h>
#include <sys/queue.h>
#include <bitstring.h>
#include <ctype.h>
#include <limits.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include "../common/common.h"
static int ex_prchars(SCR *, const char *, size_t *, size_t, u_int, int);
/*
* ex_list -- :[line [,line]] l[ist] [count] [flags]
*
* Display the addressed lines such that the output is unambiguous.
*
* PUBLIC: int ex_list(SCR *, EXCMD *);
*/
int
ex_list(SCR *sp, EXCMD *cmdp)
{
if (ex_print(sp, cmdp,
&cmdp->addr1, &cmdp->addr2, cmdp->iflags | E_C_LIST))
return (1);
sp->lno = cmdp->addr2.lno;
sp->cno = cmdp->addr2.cno;
return (0);
}
/*
* ex_number -- :[line [,line]] nu[mber] [count] [flags]
*
* Display the addressed lines with a leading line number.
*
* PUBLIC: int ex_number(SCR *, EXCMD *);
*/
int
ex_number(SCR *sp, EXCMD *cmdp)
{
if (ex_print(sp, cmdp,
&cmdp->addr1, &cmdp->addr2, cmdp->iflags | E_C_HASH))
return (1);
sp->lno = cmdp->addr2.lno;
sp->cno = cmdp->addr2.cno;
return (0);
}
/*
* ex_pr -- :[line [,line]] p[rint] [count] [flags]
*
* Display the addressed lines.
*
* PUBLIC: int ex_pr(SCR *, EXCMD *);
*/
int
ex_pr(SCR *sp, EXCMD *cmdp)
{
if (ex_print(sp, cmdp, &cmdp->addr1, &cmdp->addr2, cmdp->iflags))
return (1);
sp->lno = cmdp->addr2.lno;
sp->cno = cmdp->addr2.cno;
return (0);
}
/*
* ex_print --
* Print the selected lines.
*
* PUBLIC: int ex_print(SCR *, EXCMD *, MARK *, MARK *, u_int32_t);
*/
int
ex_print(SCR *sp, EXCMD *cmdp, MARK *fp, MARK *tp, u_int32_t flags)
{
recno_t from, to;
size_t col, len;
char *p, buf[10];
NEEDFILE(sp, cmdp);
for (from = fp->lno, to = tp->lno; from <= to; ++from) {
col = 0;
/*
* Display the line number. The %6 format is specified
* by POSIX 1003.2, and is almost certainly large enough.
* Check, though, just in case.
*/
if (LF_ISSET(E_C_HASH)) {
if (from <= 999999) {
snprintf(buf, sizeof(buf), "%6lu ", (ulong)from);
p = buf;
} else
p = "TOOBIG ";
if (ex_prchars(sp, p, &col, 8, 0, 0))
return (1);
}
/*
* Display the line. The format for E_C_PRINT isn't very good,
* especially in handling end-of-line tabs, but they're almost
* backward compatible.
*/
if (db_get(sp, from, DBG_FATAL, &p, &len))
return (1);
if (len == 0 && !LF_ISSET(E_C_LIST))
(void)ex_puts(sp, "\n");
else if (ex_ldisplay(sp, p, len, col, flags))
return (1);
if (INTERRUPTED(sp))
break;
}
return (0);
}
/*
* ex_ldisplay --
* Display a line without any preceding number.
*
* PUBLIC: int ex_ldisplay(SCR *, const char *, size_t, size_t, u_int);
*/
int
ex_ldisplay(SCR *sp, const char *p, size_t len, size_t col, u_int flags)
{
if (len > 0 && ex_prchars(sp, p, &col, len, LF_ISSET(E_C_LIST), 0))
return (1);
if (!INTERRUPTED(sp) && LF_ISSET(E_C_LIST)) {
p = "$";
if (ex_prchars(sp, p, &col, 1, LF_ISSET(E_C_LIST), 0))
return (1);
}
if (!INTERRUPTED(sp))
(void)ex_puts(sp, "\n");
return (0);
}
/*
* ex_scprint --
* Display a line for the substitute with confirmation routine.
*
* PUBLIC: int ex_scprint(SCR *, MARK *, MARK *);
*/
int
ex_scprint(SCR *sp, MARK *fp, MARK *tp)
{
const char *p;
size_t col, len;
col = 0;
if (O_ISSET(sp, O_NUMBER)) {
p = " ";
if (ex_prchars(sp, p, &col, 8, 0, 0))
return (1);
}
if (db_get(sp, fp->lno, DBG_FATAL, (char **)&p, &len))
return (1);
if (ex_prchars(sp, p, &col, fp->cno, 0, ' '))
return (1);
p += fp->cno;
if (ex_prchars(sp,
p, &col, tp->cno == fp->cno ? 1 : tp->cno - fp->cno, 0, '^'))
return (1);
if (INTERRUPTED(sp))
return (1);
p = "[ynq]";
if (ex_prchars(sp, p, &col, 5, 0, 0))
return (1);
(void)ex_fflush(sp);
return (0);
}
/*
* ex_prchars --
* Local routine to dump characters to the screen.
*/
static int
ex_prchars(SCR *sp, const char *p, size_t *colp, size_t len, u_int flags,
int repeatc)
{
CHAR_T ch, *kp;
size_t col, tlen, ts;
if (O_ISSET(sp, O_LIST))
LF_SET(E_C_LIST);
ts = O_VAL(sp, O_TABSTOP);
for (col = *colp; len--;)
if ((ch = *p++) == '\t' && !LF_ISSET(E_C_LIST))
for (tlen = ts - col % ts;
col < sp->cols && tlen--; ++col) {
(void)ex_printf(sp,
"%c", repeatc ? repeatc : ' ');
if (INTERRUPTED(sp))
goto intr;
}
else {
kp = KEY_NAME(sp, ch);
tlen = KEY_LEN(sp, ch);
if (!repeatc && col + tlen < sp->cols) {
(void)ex_puts(sp, kp);
col += tlen;
} else
for (; tlen--; ++kp, ++col) {
if (col == sp->cols) {
col = 0;
(void)ex_puts(sp, "\n");
}
(void)ex_printf(sp,
"%c", repeatc ? repeatc : *kp);
if (INTERRUPTED(sp))
goto intr;
}
}
intr: *colp = col;
return (0);
}
/*
* ex_printf --
* Ex's version of printf.
*
* PUBLIC: int ex_printf(SCR *, const char *, ...);
*/
int
ex_printf(SCR *sp, const char *fmt, ...)
{
EX_PRIVATE *exp;
va_list ap;
size_t n;
exp = EXP(sp);
va_start(ap, fmt);
n = vsnprintf(exp->obp + exp->obp_len,
sizeof(exp->obp) - exp->obp_len, fmt, ap);
va_end(ap);
if (n >= sizeof(exp->obp) - exp->obp_len)
n = sizeof(exp->obp) - exp->obp_len - 1;
exp->obp_len += n;
/* Flush when reach a <newline> or half the buffer. */
if (exp->obp[exp->obp_len - 1] == '\n' ||
exp->obp_len > sizeof(exp->obp) / 2)
(void)ex_fflush(sp);
return (n);
}
/*
* ex_puts --
* Ex's version of puts.
*
* PUBLIC: int ex_puts(SCR *, const char *);
*/
int
ex_puts(SCR *sp, const char *str)
{
EX_PRIVATE *exp;
int doflush, n;
exp = EXP(sp);
/* Flush when reach a <newline> or the end of the buffer. */
for (doflush = n = 0; *str != '\0'; ++n) {
if (exp->obp_len > sizeof(exp->obp))
(void)ex_fflush(sp);
if ((exp->obp[exp->obp_len++] = *str++) == '\n')
doflush = 1;
}
if (doflush)
(void)ex_fflush(sp);
return (n);
}
/*
* ex_fflush --
* Ex's version of fflush.
*
* PUBLIC: int ex_fflush(SCR *sp);
*/
int
ex_fflush(SCR *sp)
{
EX_PRIVATE *exp;
exp = EXP(sp);
if (exp->obp_len != 0) {
sp->gp->scr_msg(sp, M_NONE, exp->obp, exp->obp_len);
exp->obp_len = 0;
}
return (0);
}
|
f40743283cea381836cadaf2357b3e6b2f9c64cb
|
618d0e65e0d3bdefc5c82c8e0d337870987b6be7
|
/resources/add-node.h
|
af9f482ddd65a5d8658e4dff0dcd0763dc4d61aa
|
[
"Apache-2.0"
] |
permissive
|
mongodb-js/boxednode
|
6f980f20b831ae3e25ceee31314af6041a68e9a5
|
92c0e50e3ba70e01b634820fc84d38dde95bf234
|
refs/heads/main
| 2023-09-04T05:03:51.454814
| 2023-08-05T09:24:17
| 2023-08-05T09:24:17
| 297,444,810
| 580
| 9
|
Apache-2.0
| 2023-08-05T09:14:09
| 2020-09-21T19:48:01
|
TypeScript
|
UTF-8
|
C
| false
| false
| 4,405
|
h
|
add-node.h
|
#ifdef BUILDING_BOXEDNODE_EXTENSION
#undef NODE_MODULE_X
#define NODE_MODULE_X(modname, regfunc, priv, flags) \
extern "C" { \
static node::node_module _module = \
{ \
NODE_MODULE_VERSION, \
flags, \
NULL, /* NOLINT (readability/null_usage) */ \
__FILE__, \
(node::addon_register_func) (regfunc), \
NULL, /* NOLINT (readability/null_usage) */ \
NODE_STRINGIFY(BOXEDNODE_MODULE_NAME), \
priv, \
NULL /* NOLINT (readability/null_usage) */ \
}; \
void BOXEDNODE_REGISTER_FUNCTION( \
const void** node_mod, const void**) { \
*node_mod = &_module; \
} \
}
#undef NODE_MODULE_CONTEXT_AWARE_X
#define NODE_MODULE_CONTEXT_AWARE_X(modname, regfunc, priv, flags) \
extern "C" { \
static node::node_module _module = \
{ \
NODE_MODULE_VERSION, \
flags, \
NULL, /* NOLINT (readability/null_usage) */ \
__FILE__, \
NULL, /* NOLINT (readability/null_usage) */ \
(node::addon_context_register_func) (regfunc), \
NODE_STRINGIFY(BOXEDNODE_MODULE_NAME), \
priv, \
NULL /* NOLINT (readability/null_usage) */ \
}; \
void BOXEDNODE_REGISTER_FUNCTION( \
const void** node_mod, const void**) { \
*node_mod = &_module; \
} \
}
#undef NODE_MODULE
#define NODE_MODULE(modname, regfunc) \
NODE_MODULE_X(modname, regfunc, NULL, 0x2)
#undef NODE_MODULE_CONTEXT_AWARE
#define NODE_MODULE_CONTEXT_AWARE(modname, regfunc) \
NODE_MODULE_CONTEXT_AWARE_X(modname, regfunc, NULL, 0x2)
#undef NODE_MODULE_DECL
#define NODE_MODULE_DECL /* nothing */
#undef NODE_MODULE_INITIALIZER_BASE
#define NODE_MODULE_INITIALIZER_BASE node_register_module_v
#undef NODE_MODULE_INITIALIZER_X
#define NODE_MODULE_INITIALIZER_X(base, version) \
NODE_MODULE_INITIALIZER_X_HELPER(base, version)
#undef NODE_MODULE_INITIALIZER_X_HELPER
#define NODE_MODULE_INITIALIZER_X_HELPER(base, version) base##version
#undef NODE_MODULE_INITIALIZER
#define NODE_MODULE_INITIALIZER \
NODE_MODULE_INITIALIZER_X(NODE_MODULE_INITIALIZER_BASE, \
NODE_MODULE_VERSION)
#undef NODE_MODULE_INIT
#define NODE_MODULE_INIT() \
extern "C" NODE_MODULE_EXPORT void \
NODE_MODULE_INITIALIZER(v8::Local<v8::Object> exports, \
v8::Local<v8::Value> module, \
v8::Local<v8::Context> context); \
NODE_MODULE_CONTEXT_AWARE(NODE_GYP_MODULE_NAME, \
NODE_MODULE_INITIALIZER) \
void NODE_MODULE_INITIALIZER(v8::Local<v8::Object> exports, \
v8::Local<v8::Value> module, \
v8::Local<v8::Context> context)
#endif // BUILDING_BOXEDNODE_EXTENSION
|
9b0311d8b37001b602bd1b0dac1dcf05e16f8423
|
99bdb3251fecee538e0630f15f6574054dfc1468
|
/bsp/microchip/same70/bsp/hri/hri_rtt_e70b.h
|
daffcdf4bc0dcaa12fae7417afa299fe7cf3bacc
|
[
"Apache-2.0",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"MIT",
"BSD-3-Clause",
"X11",
"BSD-4-Clause-UC",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
RT-Thread/rt-thread
|
03a7c52c2aeb1b06a544143b0e803d72f47d1ece
|
3602f891211904a27dcbd51e5ba72fefce7326b2
|
refs/heads/master
| 2023-09-01T04:10:20.295801
| 2023-08-31T16:20:55
| 2023-08-31T16:20:55
| 7,408,108
| 9,599
| 5,805
|
Apache-2.0
| 2023-09-14T13:37:26
| 2013-01-02T14:49:21
|
C
|
UTF-8
|
C
| false
| false
| 12,677
|
h
|
hri_rtt_e70b.h
|
/**
* \file
*
* \brief SAM RTT
*
* Copyright (c) 2017-2018 Microchip Technology Inc. and its subsidiaries.
*
* \asf_license_start
*
* \page License
*
* Subject to your compliance with these terms, you may use Microchip
* software and any derivatives exclusively with Microchip products.
* It is your responsibility to comply with third party license terms applicable
* to your use of third party software (including open source software) that
* may accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES,
* WHETHER EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE,
* INCLUDING ANY IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY,
* AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT WILL MICROCHIP BE
* LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE, INCIDENTAL OR CONSEQUENTIAL
* LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND WHATSOEVER RELATED TO THE
* SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS BEEN ADVISED OF THE
* POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE FULLEST EXTENT
* ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN ANY WAY
* RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*
* \asf_license_stop
*/
#ifdef _SAME70_RTT_COMPONENT_
#ifndef _HRI_RTT_E70B_H_INCLUDED_
#define _HRI_RTT_E70B_H_INCLUDED_
#ifdef __cplusplus
extern "C" {
#endif
#include <stdbool.h>
#include <hal_atomic.h>
#if defined(ENABLE_RTT_CRITICAL_SECTIONS)
#define RTT_CRITICAL_SECTION_ENTER() CRITICAL_SECTION_ENTER()
#define RTT_CRITICAL_SECTION_LEAVE() CRITICAL_SECTION_LEAVE()
#else
#define RTT_CRITICAL_SECTION_ENTER()
#define RTT_CRITICAL_SECTION_LEAVE()
#endif
typedef uint32_t hri_rtt_ar_reg_t;
typedef uint32_t hri_rtt_mr_reg_t;
typedef uint32_t hri_rtt_sr_reg_t;
typedef uint32_t hri_rtt_vr_reg_t;
static inline hri_rtt_vr_reg_t hri_rtt_get_VR_CRTV_bf(const void *const hw, hri_rtt_vr_reg_t mask)
{
return (((Rtt *)hw)->RTT_VR & RTT_VR_CRTV(mask)) >> RTT_VR_CRTV_Pos;
}
static inline hri_rtt_vr_reg_t hri_rtt_read_VR_CRTV_bf(const void *const hw)
{
return (((Rtt *)hw)->RTT_VR & RTT_VR_CRTV_Msk) >> RTT_VR_CRTV_Pos;
}
static inline hri_rtt_vr_reg_t hri_rtt_get_VR_reg(const void *const hw, hri_rtt_vr_reg_t mask)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_VR;
tmp &= mask;
return tmp;
}
static inline hri_rtt_vr_reg_t hri_rtt_read_VR_reg(const void *const hw)
{
return ((Rtt *)hw)->RTT_VR;
}
static inline bool hri_rtt_get_SR_ALMS_bit(const void *const hw)
{
return (((Rtt *)hw)->RTT_SR & RTT_SR_ALMS) > 0;
}
static inline bool hri_rtt_get_SR_RTTINC_bit(const void *const hw)
{
return (((Rtt *)hw)->RTT_SR & RTT_SR_RTTINC) > 0;
}
static inline hri_rtt_sr_reg_t hri_rtt_get_SR_reg(const void *const hw, hri_rtt_sr_reg_t mask)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_SR;
tmp &= mask;
return tmp;
}
static inline hri_rtt_sr_reg_t hri_rtt_read_SR_reg(const void *const hw)
{
return ((Rtt *)hw)->RTT_SR;
}
static inline void hri_rtt_set_MR_ALMIEN_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR |= RTT_MR_ALMIEN;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline bool hri_rtt_get_MR_ALMIEN_bit(const void *const hw)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_MR;
tmp = (tmp & RTT_MR_ALMIEN) >> RTT_MR_ALMIEN_Pos;
return (bool)tmp;
}
static inline void hri_rtt_write_MR_ALMIEN_bit(const void *const hw, bool value)
{
uint32_t tmp;
RTT_CRITICAL_SECTION_ENTER();
tmp = ((Rtt *)hw)->RTT_MR;
tmp &= ~RTT_MR_ALMIEN;
tmp |= value << RTT_MR_ALMIEN_Pos;
((Rtt *)hw)->RTT_MR = tmp;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_clear_MR_ALMIEN_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR &= ~RTT_MR_ALMIEN;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_toggle_MR_ALMIEN_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR ^= RTT_MR_ALMIEN;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_set_MR_RTTINCIEN_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR |= RTT_MR_RTTINCIEN;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline bool hri_rtt_get_MR_RTTINCIEN_bit(const void *const hw)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_MR;
tmp = (tmp & RTT_MR_RTTINCIEN) >> RTT_MR_RTTINCIEN_Pos;
return (bool)tmp;
}
static inline void hri_rtt_write_MR_RTTINCIEN_bit(const void *const hw, bool value)
{
uint32_t tmp;
RTT_CRITICAL_SECTION_ENTER();
tmp = ((Rtt *)hw)->RTT_MR;
tmp &= ~RTT_MR_RTTINCIEN;
tmp |= value << RTT_MR_RTTINCIEN_Pos;
((Rtt *)hw)->RTT_MR = tmp;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_clear_MR_RTTINCIEN_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR &= ~RTT_MR_RTTINCIEN;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_toggle_MR_RTTINCIEN_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR ^= RTT_MR_RTTINCIEN;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_set_MR_RTTRST_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR |= RTT_MR_RTTRST;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline bool hri_rtt_get_MR_RTTRST_bit(const void *const hw)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_MR;
tmp = (tmp & RTT_MR_RTTRST) >> RTT_MR_RTTRST_Pos;
return (bool)tmp;
}
static inline void hri_rtt_write_MR_RTTRST_bit(const void *const hw, bool value)
{
uint32_t tmp;
RTT_CRITICAL_SECTION_ENTER();
tmp = ((Rtt *)hw)->RTT_MR;
tmp &= ~RTT_MR_RTTRST;
tmp |= value << RTT_MR_RTTRST_Pos;
((Rtt *)hw)->RTT_MR = tmp;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_clear_MR_RTTRST_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR &= ~RTT_MR_RTTRST;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_toggle_MR_RTTRST_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR ^= RTT_MR_RTTRST;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_set_MR_RTTDIS_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR |= RTT_MR_RTTDIS;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline bool hri_rtt_get_MR_RTTDIS_bit(const void *const hw)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_MR;
tmp = (tmp & RTT_MR_RTTDIS) >> RTT_MR_RTTDIS_Pos;
return (bool)tmp;
}
static inline void hri_rtt_write_MR_RTTDIS_bit(const void *const hw, bool value)
{
uint32_t tmp;
RTT_CRITICAL_SECTION_ENTER();
tmp = ((Rtt *)hw)->RTT_MR;
tmp &= ~RTT_MR_RTTDIS;
tmp |= value << RTT_MR_RTTDIS_Pos;
((Rtt *)hw)->RTT_MR = tmp;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_clear_MR_RTTDIS_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR &= ~RTT_MR_RTTDIS;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_toggle_MR_RTTDIS_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR ^= RTT_MR_RTTDIS;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_set_MR_RTC1HZ_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR |= RTT_MR_RTC1HZ;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline bool hri_rtt_get_MR_RTC1HZ_bit(const void *const hw)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_MR;
tmp = (tmp & RTT_MR_RTC1HZ) >> RTT_MR_RTC1HZ_Pos;
return (bool)tmp;
}
static inline void hri_rtt_write_MR_RTC1HZ_bit(const void *const hw, bool value)
{
uint32_t tmp;
RTT_CRITICAL_SECTION_ENTER();
tmp = ((Rtt *)hw)->RTT_MR;
tmp &= ~RTT_MR_RTC1HZ;
tmp |= value << RTT_MR_RTC1HZ_Pos;
((Rtt *)hw)->RTT_MR = tmp;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_clear_MR_RTC1HZ_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR &= ~RTT_MR_RTC1HZ;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_toggle_MR_RTC1HZ_bit(const void *const hw)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR ^= RTT_MR_RTC1HZ;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_set_MR_RTPRES_bf(const void *const hw, hri_rtt_mr_reg_t mask)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR |= RTT_MR_RTPRES(mask);
RTT_CRITICAL_SECTION_LEAVE();
}
static inline hri_rtt_mr_reg_t hri_rtt_get_MR_RTPRES_bf(const void *const hw, hri_rtt_mr_reg_t mask)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_MR;
tmp = (tmp & RTT_MR_RTPRES(mask)) >> RTT_MR_RTPRES_Pos;
return tmp;
}
static inline void hri_rtt_write_MR_RTPRES_bf(const void *const hw, hri_rtt_mr_reg_t data)
{
uint32_t tmp;
RTT_CRITICAL_SECTION_ENTER();
tmp = ((Rtt *)hw)->RTT_MR;
tmp &= ~RTT_MR_RTPRES_Msk;
tmp |= RTT_MR_RTPRES(data);
((Rtt *)hw)->RTT_MR = tmp;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_clear_MR_RTPRES_bf(const void *const hw, hri_rtt_mr_reg_t mask)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR &= ~RTT_MR_RTPRES(mask);
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_toggle_MR_RTPRES_bf(const void *const hw, hri_rtt_mr_reg_t mask)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR ^= RTT_MR_RTPRES(mask);
RTT_CRITICAL_SECTION_LEAVE();
}
static inline hri_rtt_mr_reg_t hri_rtt_read_MR_RTPRES_bf(const void *const hw)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_MR;
tmp = (tmp & RTT_MR_RTPRES_Msk) >> RTT_MR_RTPRES_Pos;
return tmp;
}
static inline void hri_rtt_set_MR_reg(const void *const hw, hri_rtt_mr_reg_t mask)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR |= mask;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline hri_rtt_mr_reg_t hri_rtt_get_MR_reg(const void *const hw, hri_rtt_mr_reg_t mask)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_MR;
tmp &= mask;
return tmp;
}
static inline void hri_rtt_write_MR_reg(const void *const hw, hri_rtt_mr_reg_t data)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR = data;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_clear_MR_reg(const void *const hw, hri_rtt_mr_reg_t mask)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR &= ~mask;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_toggle_MR_reg(const void *const hw, hri_rtt_mr_reg_t mask)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_MR ^= mask;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline hri_rtt_mr_reg_t hri_rtt_read_MR_reg(const void *const hw)
{
return ((Rtt *)hw)->RTT_MR;
}
static inline void hri_rtt_set_AR_ALMV_bf(const void *const hw, hri_rtt_ar_reg_t mask)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_AR |= RTT_AR_ALMV(mask);
RTT_CRITICAL_SECTION_LEAVE();
}
static inline hri_rtt_ar_reg_t hri_rtt_get_AR_ALMV_bf(const void *const hw, hri_rtt_ar_reg_t mask)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_AR;
tmp = (tmp & RTT_AR_ALMV(mask)) >> RTT_AR_ALMV_Pos;
return tmp;
}
static inline void hri_rtt_write_AR_ALMV_bf(const void *const hw, hri_rtt_ar_reg_t data)
{
uint32_t tmp;
RTT_CRITICAL_SECTION_ENTER();
tmp = ((Rtt *)hw)->RTT_AR;
tmp &= ~RTT_AR_ALMV_Msk;
tmp |= RTT_AR_ALMV(data);
((Rtt *)hw)->RTT_AR = tmp;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_clear_AR_ALMV_bf(const void *const hw, hri_rtt_ar_reg_t mask)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_AR &= ~RTT_AR_ALMV(mask);
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_toggle_AR_ALMV_bf(const void *const hw, hri_rtt_ar_reg_t mask)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_AR ^= RTT_AR_ALMV(mask);
RTT_CRITICAL_SECTION_LEAVE();
}
static inline hri_rtt_ar_reg_t hri_rtt_read_AR_ALMV_bf(const void *const hw)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_AR;
tmp = (tmp & RTT_AR_ALMV_Msk) >> RTT_AR_ALMV_Pos;
return tmp;
}
static inline void hri_rtt_set_AR_reg(const void *const hw, hri_rtt_ar_reg_t mask)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_AR |= mask;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline hri_rtt_ar_reg_t hri_rtt_get_AR_reg(const void *const hw, hri_rtt_ar_reg_t mask)
{
uint32_t tmp;
tmp = ((Rtt *)hw)->RTT_AR;
tmp &= mask;
return tmp;
}
static inline void hri_rtt_write_AR_reg(const void *const hw, hri_rtt_ar_reg_t data)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_AR = data;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_clear_AR_reg(const void *const hw, hri_rtt_ar_reg_t mask)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_AR &= ~mask;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline void hri_rtt_toggle_AR_reg(const void *const hw, hri_rtt_ar_reg_t mask)
{
RTT_CRITICAL_SECTION_ENTER();
((Rtt *)hw)->RTT_AR ^= mask;
RTT_CRITICAL_SECTION_LEAVE();
}
static inline hri_rtt_ar_reg_t hri_rtt_read_AR_reg(const void *const hw)
{
return ((Rtt *)hw)->RTT_AR;
}
#ifdef __cplusplus
}
#endif
#endif /* _HRI_RTT_E70B_H_INCLUDED */
#endif /* _SAME70_RTT_COMPONENT_ */
|
64860d7121fa1c9abfc5cc005cd5b6e45cf6f4ea
|
99bdb3251fecee538e0630f15f6574054dfc1468
|
/bsp/lpc55sxx/Libraries/drivers/drv_sound_wm8904.h
|
17c9402b4618c4fdcc198811b249b06bb949547e
|
[
"Apache-2.0",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"MIT",
"BSD-3-Clause",
"X11",
"BSD-4-Clause-UC",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
RT-Thread/rt-thread
|
03a7c52c2aeb1b06a544143b0e803d72f47d1ece
|
3602f891211904a27dcbd51e5ba72fefce7326b2
|
refs/heads/master
| 2023-09-01T04:10:20.295801
| 2023-08-31T16:20:55
| 2023-08-31T16:20:55
| 7,408,108
| 9,599
| 5,805
|
Apache-2.0
| 2023-09-14T13:37:26
| 2013-01-02T14:49:21
|
C
|
UTF-8
|
C
| false
| false
| 7,897
|
h
|
drv_sound_wm8904.h
|
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-03-12 Vandoul the first version
*/
#ifndef __DRV_SOUND_WM8904_H__
#define __DRV_SOUND_WM8904_H__
#include <rtthread.h>
typedef enum
{
WM8904_RESET = 0x00,
WM8904_ANALOG_ADC_0 = 0x0A,
WM8904_POWER_MGMT_0 = 0x0C,
WM8904_POWER_MGMT_2 = 0x0E,
WM8904_POWER_MGMT_3 = 0x0F,
WM8904_POWER_MGMT_6 = 0x12,
WM8904_CLK_RATES_0 = 0x14,
WM8904_CLK_RATES_1 = 0x15,
WM8904_CLK_RATES_2 = 0x16,
WM8904_AUDIO_IF_0 = 0x18,
WM8904_AUDIO_IF_1 = 0x19,
WM8904_AUDIO_IF_2 = 0x1A,
WM8904_AUDIO_IF_3 = 0x1B,
WM8904_DAC_DIG_1 = 0x21,
WM8904_DAC_DIG_0 = 0x27,
WM8904_ANALOG_LEFT_IN_0 = 0x2C,
WM8904_ANALOG_RIGHT_IN_0 = 0x2D,
WM8904_ANALOG_LEFT_IN_1 = 0x2E,
WM8904_ANALOG_RIGHT_IN_1 = 0x2F,
WM8904_ANALOG_OUT1_LEFT = 0x39,
WM8904_ANALOG_OUT1_RIGHT = 0x3A,
WM8904_ANALOG_OUT12_ZC = 0x3D,
WM8904_DC_SERVO_0 = 0x43,
WM8904_ANALOG_HP_0 = 0x5A,
WM8904_CHRG_PUMP_0 = 0x62,
WM8904_CLS_W_0 = 0x68,
WM8904_WRT_SEQUENCER_0 = 0x6C,
WM8904_WRT_SEQUENCER_3 = 0x6F,
WM8904_WRT_SEQUENCER_4 = 0x70,
WM8904_DAC_DIGITAL_VOLUME_LEFT = 0x1E,
WM8904_DAC_DIGITAL_VOLUME_RIGHT = 0x1F,
WM8904_ADC_DIGITAL_VOLUME_LEFT = 0x24,
WM8904_ADC_DIGITAL_VOLUME_RIGHT = 0x25,
WM8904_ANALOG_OUT2_LEFT = 0x3B,
WM8904_ANALOG_OUT2_RIGHT = 0x3C,
/* FLL control register */
WM8904_FLL_CONTROL_1 = 0x74,
WM8904_FLL_CONTROL_2 = 0x75,
WM8904_FLL_CONTROL_3 = 0x76,
WM8904_FLL_CONTROL_4 = 0x77,
WM8904_FLL_CONTROL_5 = 0x78,
/* GPIO control register */
WM8904_GPIO_CONTROL_1 = 0x79,
WM8904_GPIO_CONTROL_2 = 0x7A,
WM8904_GPIO_CONTROL_3 = 0x7B,
WM8904_GPIO_CONTROL_4 = 0x7C,
/* FLL nco */
WM89004_FLL_NCO_TEST_0 = 0xF7,
WM89004_FLL_NCO_TEST_1 = 0xF8,
}wm8904_reg_t;
#define WM8904_LRC_POLARITY_POS (4U)
#define WM8904_LRC_POLARITY_NOOMAL (0)
#define WM8904_LRC_POLARITY_INVERTED (1U << WM8904_LRC_POLARITY_POS)
typedef enum _wm8904_module
{
WM8904_MODULE_ADC = 0, /*!< module ADC */
WM8904_MODULE_DAC, /*!< module DAC */
WM8904_MODULE_PGA, /*!< module PGA */
WM8904_MODULE_HEADPHONE, /*!< module headphone */
WM8904_MODULE_LINEOUT, /*!< module line out */
}wm8904_module_t;
enum
{
WM8904_HEADPHONE_LEFT = 1U,
WM8904_HEADPHONE_RIGHT = 2U,
WM8904_LINEOUT_LEFT = 4U,
WM8904_LINEOUT_RIGHT = 8U,
};
typedef enum _wm8904_timeslot{
WM8904_TIMESLOT_0 = 0U,
WM8904_TIMESLOT_1,
}wm8904_timeslot_t;
typedef enum
{
WM8904_PROTOCOL_RIGHT_JUSTIFIED = 0x00,
WM8904_PROTOCOL_LEFT_JUSTIFIED = 0x01,
WM8904_PROTOCOL_I2S = 0x02,
WM8904_PROTOCOL_PCMA = 0x03,
WM8904_PROTOCOL_PCMB = 0x13,
}wm8904_protocol_t;
/*! @brief The SYSCLK / fs ratio. */
typedef enum _wm8904_fs_ratio
{
WM8904_FSRATIO_64X = 0x0, /*!< SYSCLK is 64 * sample rate * frame width */
WM8904_FSRATIO_128X = 0x1, /*!< SYSCLK is 128 * sample rate * frame width */
WM8904_FSRATIO_192X = 0x2, /*!< SYSCLK is 192 * sample rate * frame width */
WM8904_FSRATIO_256X = 0x3, /*!< SYSCLK is 256 * sample rate * frame width */
WM8904_FSRATIO_384X = 0x4, /*!< SYSCLK is 384 * sample rate * frame width */
WM8904_FSRATIO_512X = 0x5, /*!< SYSCLK is 512 * sample rate * frame width */
WM8904_FSRATIO_768X = 0x6, /*!< SYSCLK is 768 * sample rate * frame width */
WM8904_FSRATIO_1024X = 0x7, /*!< SYSCLK is 1024 * sample rate * frame width */
WM8904_FSRATIO_1408X = 0x8, /*!< SYSCLK is 1408 * sample rate * frame width */
WM8904_FSRATIO_1536X = 0x9 /*!< SYSCLK is 1536 * sample rate * frame width */
} wm8904_fs_ratio_t;
/*! @brief Sample rate. */
typedef enum _wm8904_sample_rate
{
WM8904_SAMPLERATE_8kHz = 0x0, /*!< 8 kHz */
WM8904_SAMPLERATE_12kHz = 0x1, /*!< 12kHz */
WM8904_SAMPLERATE_16kHz = 0x2, /*!< 16kHz */
WM8904_SAMPLERATE_24kHz = 0x3, /*!< 24kHz */
WM8904_SAMPLERATE_32kHz = 0x4, /*!< 32kHz */
WM8904_SAMPLERATE_48kHz = 0x5, /*!< 48kHz */
WM8904_SAMPLERATE_11025Hz = 0x6, /*!< 11.025kHz */
WM8904_SAMPLERATE_22050Hz = 0x7, /*!< 22.05kHz */
WM8904_SAMPLERATE_44100Hz = 0x8 /*!< 44.1kHz */
} wm8904_sample_rate_t;
/*! @brief Bit width. */
typedef enum _wm8904_bit_width
{
WM8904_BITWIDTH_16 = 0x0, /*!< 16 bits */
WM8904_BITWIDTH_20 = 0x1, /*!< 20 bits */
WM8904_BITWIDTH_24 = 0x2, /*!< 24 bits */
WM8904_BITWIDTH_32 = 0x3 /*!< 32 bits */
} wm8904_bit_width_t;
enum
{
WM8904_RECORD_SOURCE_DIFFERENTIAL_LINE = 1U, /*!< record source from differential line */
WM8904_RECORD_SOURCE_LINE_INPUT = 2U, /*!< record source from line input */
WM8904_RECORD_SOURCE_DIFFERENTIAL_MIC = 4U, /*!< record source from differential mic */
WM8904_RECORD_SOURCE_DIGITAL_MIC = 8U, /*!< record source from digital microphone */
};
enum
{
WM8904_RECORD_CHANNEL_LEFT1 = 1U, /*!< left record channel 1 */
WM8904_RECORD_CHANNEL_LEFT2 = 2U, /*!< left record channel 2 */
WM8904_RECORD_CHANNEL_LEFT3 = 4U, /*!< left record channel 3 */
WM8904_RECORD_CHANNEL_RIGHT1 = 1U, /*!< right record channel 1 */
WM8904_RECORD_CHANNEL_RIGHT2 = 2U, /*!< right record channel 2 */
WM8904_RECORD_CHANNEL_RIGHT3 = 4U, /*!< right record channel 3 */
WM8904_RECORD_CHANNEL_DIFFERENTIAL_POSITIVE1 = 1U, /*!< differential positive record channel 1 */
WM8904_RECORD_CHANNEL_DIFFERENTIAL_POSITIVE2 = 2U, /*!< differential positive record channel 2 */
WM8904_RECORD_CHANNEL_DIFFERENTIAL_POSITIVE3 = 4U, /*!< differential positive record channel 3 */
WM8904_RECORD_CHANNEL_DIFFERENTIAL_NEGATIVE1 = 8U, /*!< differential negative record channel 1 */
WM8904_RECORD_CHANNEL_DIFFERENTIAL_NEGATIVE2 = 16U, /*!< differential negative record channel 2 */
WM8904_RECORD_CHANNEL_DIFFERENTIAL_NEGATIVE3 = 32U, /*!< differential negative record channel 3 */
};
/*! @brief wm8904 play source
*
*/
enum
{
WM8904_PLAY_SOURCE_PGA = 1U, /*!< play source PGA, bypass ADC */
WM8904_PLAY_SOURCE_DAC = 4U, /*!< play source Input3 */
};
/*! @brief wm8904_fll_clk_source */
typedef enum _wm8904_fll_clk_source
{
WM8904_FLL_CLK_SOURCE_MCLK = 0U, /**< wm8904 FLL clock source from MCLK */
}wm8904_fll_clk_source_t;
/*! @brief wm8904 fll configuration */
typedef struct _wm8904_fll_config
{
wm8904_fll_clk_source_t source; /*!< fll reference clock source */
rt_uint32_t ref_clock_hz; /*!< fll reference clock frequency */
rt_uint32_t output_clock_hz; /*!< fll output clock frequency */
}wm8904_fll_config_t;
/*! @brief Audio format configuration. */
typedef struct _wm8904_audio_format
{
wm8904_fs_ratio_t fsRatio; /*!< SYSCLK / fs ratio */
wm8904_sample_rate_t sampleRate; /*!< Sample rate */
wm8904_bit_width_t bitWidth; /*!< Bit width */
} wm8904_audio_format_t;
struct wm8904_config
{
const char *i2c_bus_name;
const char *i2s_bus_name;
int i2c_addr;
wm8904_protocol_t protocol;
wm8904_audio_format_t format;
};
#define WM8904_I2C_ADDRESS (0x1A)
#define WM8904_I2C_BITRATE (400000U)
/* WM8904 maximum volume */
#define WM8904_MAP_HEADPHONE_LINEOUT_MAX_VOLUME 0x3FU
#define WM8904_DAC_MAX_VOLUME 0xC0U
#endif
|
a864b294ca881e2c1393647efbc08018ae28f74c
|
182bbadb0ee7f59f1abd154d06484e555a30c6d8
|
/third_party/inchi/INCHI_BASE/src/ixa.h
|
b04fe8868b8ea7ac9d6a7dd3dea9cd966c7d99db
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-1.0-or-later"
] |
permissive
|
epam/Indigo
|
08559861adf474122366b6e2e499ed3aa56272d1
|
8e473e69f393c3a57ff75b7728999c5fb4cbf1a3
|
refs/heads/master
| 2023-09-02T10:14:46.843829
| 2023-08-25T08:39:24
| 2023-08-25T08:39:24
| 37,536,320
| 265
| 106
|
Apache-2.0
| 2023-09-14T17:34:00
| 2015-06-16T14:45:56
|
C++
|
UTF-8
|
C
| false
| false
| 38,508
|
h
|
ixa.h
|
/*
* International Chemical Identifier (InChI)
* Version 1
* Software version 1.06
* December 15, 2020
*
* The InChI library and programs are free software developed under the
* auspices of the International Union of Pure and Applied Chemistry (IUPAC).
* Originally developed at NIST.
* Modifications and additions by IUPAC and the InChI Trust.
* Some portions of code were developed/changed by external contributors
* (either contractor or volunteer) which are listed in the file
* 'External-contributors' included in this distribution.
*
* IUPAC/InChI-Trust Licence No.1.0 for the
* International Chemical Identifier (InChI)
* Copyright (C) IUPAC and InChI Trust
*
* This library is free software; you can redistribute it and/or modify it
* under the terms of the IUPAC/InChI Trust InChI Licence No.1.0,
* or any later version.
*
* Please note that this library is distributed WITHOUT ANY WARRANTIES
* whatsoever, whether expressed or implied.
* See the IUPAC/InChI-Trust InChI Licence No.1.0 for more details.
*
* You should have received a copy of the IUPAC/InChI Trust InChI
* Licence No. 1.0 with this library; if not, please e-mail:
*
* info@inchi-trust.org
*
*/
#ifndef __IXA_H__
#define __IXA_H__
/****************************************************************************/
/* InChI Extensible API Object Handles */
/*
* These are the "handles" which can be used to refer to "Objects"
* used in the InChI Extensible API.
*
* To ensure that each handle has a different formal type, each is declared
* as a pointer to a C struct containing a dummy integer field (which is in
* fact not used). In practice, the values are simply cast from int, but
* this approach provides greater type security, and prevents handles for
* different objects getting mixed up.
*/
typedef struct { int dummy; } IXA_STATUS_HANDLE_STRUCT;
typedef IXA_STATUS_HANDLE_STRUCT *IXA_STATUS_HANDLE;
typedef struct { int dummy; } IXA_MOL_HANDLE_STRUCT;
typedef IXA_MOL_HANDLE_STRUCT *IXA_MOL_HANDLE;
typedef struct { int dummy; } IXA_INCHIBUILDER_HANDLE_STRUCT;
typedef IXA_INCHIBUILDER_HANDLE_STRUCT *IXA_INCHIBUILDER_HANDLE;
typedef struct { int dummy; } IXA_INCHIKEYBUILDER_HANDLE_STRUCT;
typedef IXA_INCHIKEYBUILDER_HANDLE_STRUCT *IXA_INCHIKEYBUILDER_HANDLE;
/****************************************************************************/
/* Types for Atom, Bond and Stereo Descriptor Identifiers */
/*
* These types are for the identifiers for individual atoms, bonds and
* stereodescriptors in an IXA Molecule Object.
*
* To ensure that each identifier has a different formal type, each is
* declared as a pointer to a C struct containing a dummy integer field
* (which is in fact not used). In practice, the values are simply cast
* from int, but this approach provides greater type security, and
* prevents different sorts of identifier from getting mixed up.
*/
typedef struct { int dummy; } IXA_ATOMID_STRUCT;
typedef IXA_ATOMID_STRUCT *IXA_ATOMID;
typedef struct { int dummy; } IXA_BONDID_STRUCT;
typedef IXA_BONDID_STRUCT *IXA_BONDID;
typedef struct { int dummy; } IXA_STEREOID_STRUCT;
typedef IXA_STEREOID_STRUCT *IXA_STEREOID;
/* Extended mol data */
typedef struct { int dummy; } IXA_POLYMERUNITID_STRUCT;
typedef IXA_POLYMERUNITID_STRUCT *IXA_POLYMERUNITID; /* polymer unit */
/****************************************************************************/
/* Constants and enumerated types */
#define IXA_ATOMID_INVALID ((IXA_ATOMID)0)
#define IXA_ATOMID_IMPLICIT_H ((IXA_ATOMID)-1)
#define IXA_BONDID_INVALID ((IXA_BONDID)0)
#define IXA_STEREOID_INVALID ((IXA_STEREOID)0)
#define IXA_ATOM_NATURAL_MASS 0
#define IXA_POLYMERUNITID_INVALID ((IXA_POLYMERUNITID)0)
#define IXA_EXT_MOLDATA_INVALID (-1)
#define IXA_EXT_POLYMER_INVALID (-1)
#define IXA_EXT_V3000_INVALID (-1)
typedef enum
{
IXA_STATUS_SUCCESS,
IXA_STATUS_WARNING,
IXA_STATUS_ERROR
} IXA_STATUS;
typedef enum
{
IXA_FALSE = 0,
IXA_TRUE = 1
} IXA_BOOL;
typedef enum
{
IXA_ATOM_RADICAL_NONE = 0,
IXA_ATOM_RADICAL_SINGLET = 1,
IXA_ATOM_RADICAL_DOUBLET = 2,
IXA_ATOM_RADICAL_TRIPLET = 3
} IXA_ATOM_RADICAL;
typedef enum
{
IXA_BOND_TYPE_SINGLE = 1,
IXA_BOND_TYPE_DOUBLE = 2,
IXA_BOND_TYPE_TRIPLE = 3,
IXA_BOND_TYPE_AROMATIC = 4
} IXA_BOND_TYPE;
typedef enum
{
IXA_BOND_WEDGE_NONE = 0,
IXA_BOND_WEDGE_UP = 1,
IXA_BOND_WEDGE_DOWN = 2,
IXA_BOND_WEDGE_EITHER = 3
} IXA_BOND_WEDGE;
typedef enum
{
IXA_DBLBOND_CONFIG_PERCEIVE = 0,
IXA_DBLBOND_CONFIG_EITHER = 1
} IXA_DBLBOND_CONFIG;
typedef enum
{
IXA_STEREO_TOPOLOGY_INVALID = 0,
IXA_STEREO_TOPOLOGY_TETRAHEDRON = 2,
IXA_STEREO_TOPOLOGY_RECTANGLE = 3,
IXA_STEREO_TOPOLOGY_ANTIRECTANGLE = 4
} IXA_STEREO_TOPOLOGY;
typedef enum
{
IXA_STEREO_PARITY_NONE = 0,
IXA_STEREO_PARITY_ODD = 1,
IXA_STEREO_PARITY_EVEN = 2,
IXA_STEREO_PARITY_UNKNOWN = 3
} IXA_STEREO_PARITY;
typedef enum
{
IXA_INCHIBUILDER_OPTION_NewPsOff,
IXA_INCHIBUILDER_OPTION_DoNotAddH,
IXA_INCHIBUILDER_OPTION_SUU,
IXA_INCHIBUILDER_OPTION_SLUUD,
IXA_INCHIBUILDER_OPTION_FixedH,
IXA_INCHIBUILDER_OPTION_RecMet,
IXA_INCHIBUILDER_OPTION_KET,
IXA_INCHIBUILDER_OPTION_15T,
IXA_INCHIBUILDER_OPTION_SaveOpt,
IXA_INCHIBUILDER_OPTION_AuxNone,
IXA_INCHIBUILDER_OPTION_WarnOnEmptyStructure,
IXA_INCHIBUILDER_OPTION_LargeMolecules,
IXA_INCHIBUILDER_OPTION_Polymers,
IXA_INCHIBUILDER_OPTION_Polymers105,
IXA_INCHIBUILDER_OPTION_Polymers105Plus,
IXA_INCHIBUILDER_OPTION_FilterSS,
IXA_INCHIBUILDER_OPTION_InvFilterSS,
IXA_INCHIBUILDER_OPTION_NPZZ,
IXA_INCHIBUILDER_OPTION_SATZZ,
IXA_INCHIBUILDER_OPTION_NoFrameShift,
IXA_INCHIBUILDER_OPTION_FoldCRU,
IXA_INCHIBUILDER_OPTION_NoEdits,
IXA_INCHIBUILDER_OPTION_LooseTSACheck,
IXA_INCHIBUILDER_OPTION_OutErrInChI,
IXA_INCHIBUILDER_OPTION_NoWarnings
#if BUILD_WITH_ENG_OPTIONS==1
,IXA_INCHIBUILDER_OPTION_DoDrv,
IXA_INCHIBUILDER_OPTION_DoDrvReport,
IXA_INCHIBUILDER_OPTION_DoR2C,
IXA_INCHIBUILDER_OPTION_DoneOnly,
IXA_INCHIBUILDER_OPTION_OnlyRecSalt,
IXA_INCHIBUILDER_OPTION_OnlyExact,
IXA_INCHIBUILDER_OPTION_OnlyRecMet
#endif
} IXA_INCHIBUILDER_OPTION;
typedef enum
{
IXA_INCHIBUILDER_STEREOOPTION_SAbs,
IXA_INCHIBUILDER_STEREOOPTION_SNon,
IXA_INCHIBUILDER_STEREOOPTION_SRel,
IXA_INCHIBUILDER_STEREOOPTION_SRac,
IXA_INCHIBUILDER_STEREOOPTION_SUCF
} IXA_INCHIBUILDER_STEREOOPTION;
/* Uncomment the next line if old-API coverage is intended - instead of GetINCHIEx(), GetStructFromINCHIEx() */
/*#define IXA_USES_NON_EX_CORE_API 1*/
/* Comment the next line to disable improved storage mechanism (pre-allocated growing arrays) for IXA_MOL */
/* data structs (added after report by Daniel Lowe on performance deterioraion due to numerous alloc calls) */
#define IXA_USES_SMART_ALLOCS 1
/****************************************************************************/
#ifdef __cplusplus
extern "C" {
#endif
/****************************************************************************/
/* Functions handling IXA Status Objects */
EXPIMP_TEMPLATE INCHI_API IXA_STATUS_HANDLE INCHI_DECL IXA_STATUS_Create( );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_STATUS_Clear( IXA_STATUS_HANDLE hStatus );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_STATUS_Destroy( IXA_STATUS_HANDLE hStatus );
EXPIMP_TEMPLATE INCHI_API IXA_BOOL INCHI_DECL IXA_STATUS_HasError( IXA_STATUS_HANDLE hStatus );
EXPIMP_TEMPLATE INCHI_API IXA_BOOL INCHI_DECL IXA_STATUS_HasWarning( IXA_STATUS_HANDLE hStatus );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_STATUS_GetCount( IXA_STATUS_HANDLE hStatus );
EXPIMP_TEMPLATE INCHI_API IXA_STATUS INCHI_DECL IXA_STATUS_GetSeverity( IXA_STATUS_HANDLE hStatus,
int vIndex );
EXPIMP_TEMPLATE INCHI_API const char* INCHI_DECL IXA_STATUS_GetMessage( IXA_STATUS_HANDLE hStatus,
int vIndex );
/****************************************************************************/
/* Functions to Create, Clear and Destroy Molecule Objects */
EXPIMP_TEMPLATE INCHI_API IXA_MOL_HANDLE INCHI_DECL IXA_MOL_Create( IXA_STATUS_HANDLE hStatus );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_Clear( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_Destroy( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule );
/****************************************************************************/
/* Functions Operating on Complete Molecules */
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_ReadMolfile( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
const char* pBytes );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_ReadInChI( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
const char* pInChI );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetChiral( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_BOOL vChiral );
EXPIMP_TEMPLATE INCHI_API IXA_BOOL INCHI_DECL IXA_MOL_GetChiral( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule );
/****************************************************************************/
/* Functions to Add and Define Atoms */
EXPIMP_TEMPLATE INCHI_API IXA_ATOMID INCHI_DECL IXA_MOL_CreateAtom( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetAtomElement( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom,
const char* pElement );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetAtomAtomicNumber( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom,
int vAtomicNumber );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetAtomMass( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom,
int vMassNumber );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetAtomCharge( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom,
int vCharge );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetAtomRadical( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom,
IXA_ATOM_RADICAL vRadical );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetAtomHydrogens( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom,
int vHydrogenMassNumber,
int vHydrogenCount );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetAtomX( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom,
double vX );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetAtomY( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom,
double vY );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetAtomZ( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom,
double vZ );
/****************************************************************************/
/* Functions to Add and Define Bonds */
EXPIMP_TEMPLATE INCHI_API IXA_BONDID INCHI_DECL IXA_MOL_CreateBond( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom1,
IXA_ATOMID vAtom2 );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetBondType( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_BONDID vBond,
IXA_BOND_TYPE vType );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetBondWedge( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_BONDID vBond,
IXA_ATOMID vRefAtom,
IXA_BOND_WEDGE vDirection );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetDblBondConfig( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_BONDID vBond,
IXA_DBLBOND_CONFIG vConfig );
/*****************************************************************************/
/* Functions to Add and Define Stereodescriptors */
EXPIMP_TEMPLATE INCHI_API IXA_STEREOID INCHI_DECL IXA_MOL_CreateStereoTetrahedron( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vCentralAtom,
IXA_ATOMID vVertex1,
IXA_ATOMID vVertex2,
IXA_ATOMID vVertex3,
IXA_ATOMID vVertex4 );
EXPIMP_TEMPLATE INCHI_API IXA_STEREOID INCHI_DECL IXA_MOL_CreateStereoRectangle( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_BONDID vCentralBond,
IXA_ATOMID vVertex1,
IXA_ATOMID vVertex2,
IXA_ATOMID vVertex3,
IXA_ATOMID vVertex4 );
EXPIMP_TEMPLATE INCHI_API IXA_STEREOID INCHI_DECL IXA_MOL_CreateStereoAntiRectangle( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vCentralAtom,
IXA_ATOMID vVertex1,
IXA_ATOMID vVertex2,
IXA_ATOMID vVertex3,
IXA_ATOMID vVertex4 );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetStereoParity( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_STEREOID vStereo,
IXA_STEREO_PARITY vParity );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_ReserveSpace( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
int num_atoms,
int num_bonds,
int num_stereos );
/****************************************************************************/
/* Functions to to Treat Extended molecular data */
EXPIMP_TEMPLATE INCHI_API IXA_POLYMERUNITID INCHI_DECL IXA_MOL_CreatePolymerUnit( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_MOL_SetPolymerUnit( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_POLYMERUNITID vPunit,
int vid,
int vtype,
int vsubtype,
int vconn,
int vlabel,
int vna,
int vnb,
double vxbr1[4],
double vxbr2[4],
char vsmt[80],
int *valist,
int *vblist );
/****************************************************************************/
/* Functions to Navigate Within a Molecule */
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetNumAtoms( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetNumBonds( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule );
EXPIMP_TEMPLATE INCHI_API IXA_ATOMID INCHI_DECL IXA_MOL_GetAtomId( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
int vAtomIndex );
EXPIMP_TEMPLATE INCHI_API IXA_BONDID INCHI_DECL IXA_MOL_GetBondId( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
int vBondIndex );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetAtomIndex( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetBondIndex( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_BONDID vBond );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetAtomNumBonds( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom );
EXPIMP_TEMPLATE INCHI_API IXA_POLYMERUNITID INCHI_DECL IXA_MOL_GetPolymerUnitId( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
int vPolymerUnitIndex );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetPolymerUnitIndex( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_POLYMERUNITID vPolymerUnit );
EXPIMP_TEMPLATE INCHI_API IXA_BONDID INCHI_DECL IXA_MOL_GetAtomBond( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom,
int vBondIndex );
EXPIMP_TEMPLATE INCHI_API IXA_BONDID INCHI_DECL IXA_MOL_GetCommonBond( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom1,
IXA_ATOMID vAtom2 );
EXPIMP_TEMPLATE INCHI_API IXA_ATOMID INCHI_DECL IXA_MOL_GetBondAtom1( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_BONDID vBond );
EXPIMP_TEMPLATE INCHI_API IXA_ATOMID INCHI_DECL IXA_MOL_GetBondAtom2( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_BONDID vBond );
EXPIMP_TEMPLATE INCHI_API IXA_ATOMID INCHI_DECL IXA_MOL_GetBondOtherAtom( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_BONDID vBond,
IXA_ATOMID vAtom );
/*****************************************************************************/
/* Functions to Return Information About Atoms */
EXPIMP_TEMPLATE INCHI_API const char* INCHI_DECL IXA_MOL_GetAtomElement( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetAtomAtomicNumber( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetAtomMass( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetAtomCharge( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom );
EXPIMP_TEMPLATE INCHI_API IXA_ATOM_RADICAL INCHI_DECL IXA_MOL_GetAtomRadical( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetAtomHydrogens( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom,
int vHydrogenMassNumber );
EXPIMP_TEMPLATE INCHI_API double INCHI_DECL IXA_MOL_GetAtomX( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom );
EXPIMP_TEMPLATE INCHI_API double INCHI_DECL IXA_MOL_GetAtomY( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom );
EXPIMP_TEMPLATE INCHI_API double INCHI_DECL IXA_MOL_GetAtomZ( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_ATOMID vAtom );
/*****************************************************************************/
/* Functions to Return Information About Bonds */
EXPIMP_TEMPLATE INCHI_API IXA_BOND_TYPE INCHI_DECL IXA_MOL_GetBondType( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_BONDID vBond );
EXPIMP_TEMPLATE INCHI_API IXA_BOND_WEDGE INCHI_DECL IXA_MOL_GetBondWedge( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_BONDID vBond,
IXA_ATOMID vRefAtom );
EXPIMP_TEMPLATE INCHI_API IXA_DBLBOND_CONFIG INCHI_DECL IXA_MOL_GetDblBondConfig( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_BONDID vBond );
/*****************************************************************************/
/* Functions to return Information About Stereodescriptors */
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetNumStereos( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule );
EXPIMP_TEMPLATE INCHI_API IXA_STEREOID INCHI_DECL IXA_MOL_GetStereoId( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
int vStereoIndex );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetStereoIndex( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_STEREOID vStereo );
EXPIMP_TEMPLATE INCHI_API IXA_STEREO_TOPOLOGY INCHI_DECL IXA_MOL_GetStereoTopology( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_STEREOID vStereo );
EXPIMP_TEMPLATE INCHI_API IXA_ATOMID INCHI_DECL IXA_MOL_GetStereoCentralAtom( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_STEREOID vStereo );
EXPIMP_TEMPLATE INCHI_API IXA_BONDID INCHI_DECL IXA_MOL_GetStereoCentralBond( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_STEREOID vStereo );
EXPIMP_TEMPLATE INCHI_API int INCHI_DECL IXA_MOL_GetStereoNumVertices( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_STEREOID vStereo );
EXPIMP_TEMPLATE INCHI_API IXA_ATOMID INCHI_DECL IXA_MOL_GetStereoVertex( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_STEREOID vStereo,
int vVertexIndex );
EXPIMP_TEMPLATE INCHI_API IXA_STEREO_PARITY INCHI_DECL IXA_MOL_GetStereoParity( IXA_STATUS_HANDLE hStatus,
IXA_MOL_HANDLE hMolecule,
IXA_STEREOID vStereo );
/****************************************************************************/
/* Functions for Generating InChIs */
EXPIMP_TEMPLATE INCHI_API IXA_INCHIBUILDER_HANDLE INCHI_DECL IXA_INCHIBUILDER_Create( IXA_STATUS_HANDLE hStatus );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_INCHIBUILDER_SetMolecule( IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hInChIBuilder,
IXA_MOL_HANDLE hMolecule );
EXPIMP_TEMPLATE INCHI_API const char* INCHI_DECL IXA_INCHIBUILDER_GetInChI( IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hInChIBuilder );
EXPIMP_TEMPLATE INCHI_API const char* INCHI_DECL IXA_INCHIBUILDER_GetInChIEx( IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hBuilder );
EXPIMP_TEMPLATE INCHI_API const char* INCHI_DECL IXA_INCHIBUILDER_GetAuxInfo( IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hInChIBuilder );
EXPIMP_TEMPLATE INCHI_API const char* INCHI_DECL IXA_INCHIBUILDER_GetLog( IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hInChIBuilder );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_INCHIBUILDER_Destroy( IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hInChIBuilder );
/****************************************************************************/
/* Functions for Specifying/checking InChI Generation Options */
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_INCHIBUILDER_SetOption( IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hInChIBuilder,
IXA_INCHIBUILDER_OPTION vOption,
IXA_BOOL vValue );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_INCHIBUILDER_SetOption_Stereo( IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hInChIBuilder,
IXA_INCHIBUILDER_STEREOOPTION vValue );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_INCHIBUILDER_SetOption_Timeout( IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hInChIBuilder,
int vValue );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_INCHIBUILDER_SetOption_Timeout_MilliSeconds( IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hInChIBuilder,
long vValue );
EXPIMP_TEMPLATE INCHI_API IXA_BOOL INCHI_DECL IXA_INCHIBUILDER_CheckOption( IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hInChIBuilder,
IXA_INCHIBUILDER_OPTION vOption);
EXPIMP_TEMPLATE INCHI_API IXA_BOOL INCHI_DECL IXA_INCHIBUILDER_CheckOption_Stereo(IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hInChIBuilder,
IXA_INCHIBUILDER_STEREOOPTION vValue);
EXPIMP_TEMPLATE INCHI_API long INCHI_DECL IXA_INCHIBUILDER_GetOption_Timeout_MilliSeconds(IXA_STATUS_HANDLE hStatus,
IXA_INCHIBUILDER_HANDLE hInChIBuilder);
/****************************************************************************/
/* Functions for Generating InChI Keys */
EXPIMP_TEMPLATE INCHI_API IXA_INCHIKEYBUILDER_HANDLE INCHI_DECL IXA_INCHIKEYBUILDER_Create( IXA_STATUS_HANDLE hStatus );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_INCHIKEYBUILDER_SetInChI( IXA_STATUS_HANDLE hStatus,
IXA_INCHIKEYBUILDER_HANDLE hInChIKeyBuilder,
const char* pInChI );
EXPIMP_TEMPLATE INCHI_API const char* INCHI_DECL IXA_INCHIKEYBUILDER_GetInChIKey( IXA_STATUS_HANDLE hStatus,
IXA_INCHIKEYBUILDER_HANDLE hInChIKeyBuilder );
EXPIMP_TEMPLATE INCHI_API void INCHI_DECL IXA_INCHIKEYBUILDER_Destroy( IXA_STATUS_HANDLE hStatus,
IXA_INCHIKEYBUILDER_HANDLE hInChIKeyBuilder );
#ifdef __cplusplus
}
#endif
#endif
|
8d6d1030516879fafd8ed0b5267734d899d1295d
|
bb7a80648bf830c2fb813cdb335032142cbee06d
|
/mod/app/src/nvr_live.c
|
93c18f8e0cf091a38457a11169c4f24d616c5e03
|
[] |
no_license
|
openhisilicon/HIVIEW
|
44574a29da60e3bb400c7ce97c722dfc9f2959e6
|
60bbfa5cb66cc82f0cdc0bba1242dbc9491b0f37
|
refs/heads/master
| 2023-09-01T01:42:19.069724
| 2023-08-31T09:57:39
| 2023-08-31T09:57:39
| 189,036,134
| 336
| 121
| null | 2023-01-07T14:10:22
| 2019-05-28T13:41:40
|
C
|
UTF-8
|
C
| false
| false
| 8,217
|
c
|
nvr_live.c
|
#include <pthread.h>
#include "cfg.h"
#include "nvr_live.h"
typedef struct {
pthread_t tid;
pthread_mutex_t mutex;
pthread_cond_t cond;
int onvif[GSF_CODEC_NVR_CHN];
int rtsp[GSF_CODEC_NVR_CHN][2];
gsf_chsrc_t chsrc[GSF_CODEC_NVR_CHN];
gsf_shmid_t shmid[GSF_CODEC_NVR_CHN][2];
}live_mng_t;
static live_mng_t gmng;
static void* live_task(void *parm);
int live_mon()
{
int i = 0;
memset(&gmng, 0, sizeof(gmng));
pthread_mutex_init(&gmng.mutex, NULL);
pthread_cond_init(&gmng.cond, NULL);
for(i = 0; i < GSF_CODEC_NVR_CHN; i++)
{
gmng.shmid[i][0].video_shmid = gmng.shmid[i][0].audio_shmid = -1;
gmng.shmid[i][1].video_shmid = gmng.shmid[i][1].audio_shmid = -1;
}
return pthread_create(&gmng.tid, NULL, live_task, (void*)NULL);
}
int live_chsrc_modify(int i, gsf_chsrc_t *chsrc)
{
pthread_mutex_lock(&gmng.mutex);
app_nvr.chsrc[i] = *chsrc;
pthread_cond_signal(&gmng.cond);
pthread_mutex_unlock(&gmng.mutex);
return 0;
}
int live_clear_rtsp()
{
int i = 0;
pthread_mutex_lock(&gmng.mutex);
for(i = 0; i < GSF_CODEC_NVR_CHN; i++)
{
gmng.chsrc[i].st1[0] = gmng.chsrc[i].st2[0] = '\0';
}
pthread_cond_signal(&gmng.cond);
pthread_mutex_unlock(&gmng.mutex);
}
int live_clear_onvif()
{
int i = 0;
pthread_mutex_lock(&gmng.mutex);
for(i = 0; i < GSF_CODEC_NVR_CHN; i++)
{
gmng.chsrc[i].host[0] = '\0';
}
pthread_cond_signal(&gmng.cond);
pthread_mutex_unlock(&gmng.mutex);
}
int live_get_shmid(int layout, int voch[GSF_CODEC_NVR_CHN]
, int st, gsf_shmid_t shmid[GSF_CODEC_NVR_CHN])
{
int i = 0;
for(i = 0; i < layout; i++)
{
shmid[i] = gmng.shmid[voch[i]][st];
printf("i:%d, ch:%d, st:%d, video_shmid:%d\n",
i, voch[i], st, shmid[i].video_shmid);
}
return 0;
}
// [rtsp|onvif|...]://user:pwd@ip:port/path
int url_get_user_pwd(char *url, char *user_pwd)
{
char *p1 = strstr(url, "://");
if(p1)
{
char *p2 = strstr(url, "@");
if(p2)
strncpy(user_pwd, p1+3, p2-p1-3);
}
return 0;
}
int url_add_user_pwd(char *url, char *user_pwd)
{
char *p1 = strstr(url, "@");
if(p1)
return 0;
p1 = strstr(url, "://");
if(p1)
{
char tail[256] = {0};
strncpy(tail, p1+3, sizeof(tail)-1);
strncpy(p1+3, user_pwd, 128);
strncat(url, "@", 256);
strncat(url, tail, 256);
}
return 0;
}
static void* live_task(void *parm)
{
int i = 0;
while(1)
{
struct timespec to;
to.tv_sec = time(NULL) + 3;
to.tv_nsec = 0;
pthread_mutex_lock(&gmng.mutex);
pthread_cond_timedwait(&gmng.cond, &gmng.mutex, &to);
pthread_mutex_unlock(&gmng.mutex);
for(i = 0; i < GSF_CODEC_NVR_CHN; i++)
{
if(memcmp(&gmng.chsrc[i], &app_nvr.chsrc[i], sizeof(gsf_chsrc_t)))
{
if(gmng.rtsp[i][0])
{
printf("close >>>>>> i:%d, st1[%s]\n", i, gmng.chsrc[i].st1);
GSF_MSG_DEF(gsf_rtsp_url_t, rtsp_url, 8*1024);
rtsp_url->transp = 0;
strcpy(rtsp_url->url, gmng.chsrc[i].st1);
int ret = GSF_MSG_SENDTO(GSF_ID_RTSPS_C_CLOSE, 0, SET, 0
, sizeof(gsf_rtsp_url_t)
, GSF_IPC_RTSPS, 2000);
gmng.rtsp[i][0] = 0;
}
if(gmng.rtsp[i][1])
{
printf("close >>>>>> i:%d, st2[%s]\n", i, gmng.chsrc[i].st2);
GSF_MSG_DEF(gsf_rtsp_url_t, rtsp_url, 8*1024);
rtsp_url->transp = 0;
strcpy(rtsp_url->url, gmng.chsrc[i].st2);
int ret = GSF_MSG_SENDTO(GSF_ID_RTSPS_C_CLOSE, 0, SET, 0
, sizeof(gsf_rtsp_url_t)
, GSF_IPC_RTSPS, 2000);
gmng.rtsp[i][1] = 0;
}
if(gmng.onvif[i])
{
printf("close >>>>>> i:%d, host[%s]\n", i, gmng.chsrc[i].host);
GSF_MSG_DEF(gsf_onvif_url_t, onvif_url, 8*1024);
strcpy(onvif_url->url, gmng.chsrc[i].host);
int ret = GSF_MSG_SENDTO(GSF_ID_ONVIF_C_CLOSE, 0, SET, 0
, sizeof(gsf_onvif_url_t)
, GSF_IPC_ONVIF, 2000);
gmng.onvif[i] = 0;
}
pthread_mutex_lock(&gmng.mutex);
gmng.chsrc[i] = app_nvr.chsrc[i];
pthread_mutex_unlock(&gmng.mutex);
// unref shmid;
gmng.shmid[i][0].video_shmid = gmng.shmid[i][0].audio_shmid = -1;
gmng.shmid[i][1].video_shmid = gmng.shmid[i][1].audio_shmid = -1;
extern int vo_ly(int num); vo_ly(0);
}
#if 0
printf("i:%d, en:%d, host:[%s],st1:[%s],st2[%s], onvif:%d, rtsp:%d\n"
, i, gmng.chsrc[i].en, gmng.chsrc[i].host, gmng.chsrc[i].st1, gmng.chsrc[i].st2
, gmng.onvif[i], gmng.rtsp[i][0]);
#endif
if(gmng.chsrc[i].en)
{
if(strlen(gmng.chsrc[i].host) > 0 && gmng.onvif[i] == 0)
{
GSF_MSG_DEF(gsf_onvif_url_t, onvif_url, 8*1024);
strcpy(onvif_url->url, gmng.chsrc[i].host);
int ret = GSF_MSG_SENDTO(GSF_ID_ONVIF_C_OPEN, 0, SET, 0
, sizeof(gsf_onvif_url_t)
, GSF_IPC_ONVIF, 6000);
printf("open >>>>>> i:%d, host[%s], ret:%d, err:%d\n",
i, gmng.chsrc[i].host, ret, __pmsg->err);
if(ret == 0 && __pmsg->err == 0)
{
gsf_onvif_media_url_t *murl = (gsf_onvif_media_url_t*)__pmsg->data;
char user_pwd[128] = {0};
url_get_user_pwd(gmng.chsrc[i].host, user_pwd);
url_add_user_pwd(murl->st1, user_pwd);
strcpy(gmng.chsrc[i].st1, murl->st1);
url_add_user_pwd(murl->st2, user_pwd);
strcpy(gmng.chsrc[i].st2, murl->st2);
printf("i:%d, get st1[%s], st2[%s]\n", i, gmng.chsrc[i].st1, gmng.chsrc[i].st2);
gmng.onvif[i] = 1;
pthread_mutex_lock(&gmng.mutex);
app_nvr.chsrc[i] = gmng.chsrc[i];
pthread_mutex_unlock(&gmng.mutex);
}
}
if(strlen(gmng.chsrc[i].st1) > 0 && gmng.rtsp[i][0] == 0)
{
GSF_MSG_DEF(gsf_rtsp_url_t, rtsp_url, 8*1024);
rtsp_url->transp = 0;
strcpy(rtsp_url->url, gmng.chsrc[i].st1);
int ret = GSF_MSG_SENDTO(GSF_ID_RTSPS_C_OPEN, 0, SET, 0
, sizeof(gsf_rtsp_url_t)
, GSF_IPC_RTSPS, 3000);
printf("open >>>>>> i:%d, st1[%s], ret:%d, err:%d\n",
i, gmng.chsrc[i].st1, ret, __pmsg->err);
if(ret == 0 && __pmsg->err == 0)
{
// ref shmid;
gsf_shmid_t *shmid = (gsf_shmid_t*)__pmsg->data;
gmng.shmid[i][0] = *shmid;
gmng.rtsp[i][0] = 1;
extern int vo_ly(int num); vo_ly(0);
}
}
if(strlen(gmng.chsrc[i].st2) > 0 && gmng.rtsp[i][1] == 0)
{
GSF_MSG_DEF(gsf_rtsp_url_t, rtsp_url, 8*1024);
rtsp_url->transp = 0;
strcpy(rtsp_url->url, gmng.chsrc[i].st2);
int ret = GSF_MSG_SENDTO(GSF_ID_RTSPS_C_OPEN, 0, SET, 0
, sizeof(gsf_rtsp_url_t)
, GSF_IPC_RTSPS, 3000);
printf("open >>>>>> i:%d, st2[%s], ret:%d, err:%d\n",
i, gmng.chsrc[i].st1, ret, __pmsg->err);
if(ret == 0 && __pmsg->err == 0)
{
// ref shmid;
gsf_shmid_t *shmid = (gsf_shmid_t*)__pmsg->data;
gmng.shmid[i][1] = *shmid;
gmng.rtsp[i][1] = 1;
extern int vo_ly(int num); vo_ly(0);
}
}
}
}
}
return NULL;
}
|
45dda0aaa3e8cb99a63d3277a55f50aff2ada7e2
|
e73547787354afd9b717ea57fe8dd0695d161821
|
/src/world/area_hos/hos_00/hos_00_6_scenes.c
|
1569df2d838a174164bb3b3afb0b53d73210d38f
|
[] |
no_license
|
pmret/papermario
|
8b514b19653cef8d6145e47499b3636b8c474a37
|
9774b26d93f1045dd2a67e502b6efc9599fb6c31
|
refs/heads/main
| 2023-08-31T07:09:48.951514
| 2023-08-21T18:07:08
| 2023-08-21T18:07:08
| 287,151,133
| 904
| 139
| null | 2023-09-14T02:44:23
| 2020-08-13T01:22:57
|
C
|
UTF-8
|
C
| false
| false
| 22,754
|
c
|
hos_00_6_scenes.c
|
#include "hos_00.h"
#include "sprite/player.h"
#include "world/common/complete/GiveReward.inc.c"
Vec3f N(TwinkArrivePath)[] = {
{ 0.0, 0.0, 2.0 },
{ 73.0, -6.0, 2.0 },
{ 106.0, 32.0, 2.0 },
{ 70.0, 43.0, 2.0 },
{ 30.0, 12.0, 2.0 },
{ -17.0, -19.0, 2.0 },
{ -35.0, 29.0, 2.0 },
{ -20.0, 43.0, 2.0 },
{ -11.0, 39.0, 2.0 },
{ 0.0, -1.0, 0.0 },
};
Vec3f N(FlightPath_Magikoopa)[] = {
{ 15.0, 174.0, -45.0 },
{ -157.0, 92.0, -44.0 },
{ -371.0, 103.0, -45.0 },
{ -350.0, 75.0, -45.0 },
{ -250.0, 40.0, -45.0 },
};
Vec3f N(TwinkDepartPath1)[] = {
{ 0.0, 0.0, 0.0 },
{ -60.0, 3.0, -11.0 },
{ -55.0, 13.0, 0.0 },
{ 4.0, 40.0, -5.0 },
{ 23.0, 35.0, -10.0 },
{ 63.0, 25.0, -20.0 },
{ 94.0, 30.0, -40.0 },
{ 134.0, 250.0, -80.0 },
};
Vec3f N(TwinkReturnPath)[] = {
{ -72.0, 318.0, -80.0 },
{ -127.0, 153.0, -60.0 },
{ -174.0, 114.0, -44.0 },
{ -243.0, 90.0, -45.0 },
{ -268.0, 50.0, -46.0 },
{ -174.0, 10.0, -43.0 },
};
Vec3f N(TwinkDepartPath2)[] = {
{ 0.0, 0.0, 0.0 },
{ -30.0, 3.0, -11.0 },
{ -55.0, 13.0, 0.0 },
{ -73.0, 20.0, -5.0 },
{ -103.0, 40.0, 0.0 },
{ -143.0, 60.0, 0.0 },
{ -173.0, 50.0, 26.0 },
{ -126.0, 35.0, 16.0 },
{ 1111.0, 570.0, -527.0 },
};
API_CALLABLE(N(EnableActionCommands)) {
gPlayerData.hasActionCommands = TRUE;
return ApiStatus_DONE2;
}
API_CALLABLE(N(UnusedTrigFunc)) {
Bytecode* args = script->ptrReadPos;
f32 magnitude = evt_get_float_variable(script, *args++);
f32 angle = evt_get_float_variable(script, *args++);
evt_set_float_variable(script, *args++, magnitude * sin_deg(angle));
return ApiStatus_DONE2;
}
f32 N(LastTwinkPosX) = 0;
f32 N(LastTwinkPosZ) = 0;
API_CALLABLE(N(HavePartyFaceTwink)) {
Npc* partner = get_npc_unsafe(NPC_PARTNER);
Npc* npc = get_npc_unsafe(NPC_Twink);
partner->yaw = atan2(partner->pos.x, partner->pos.z, npc->pos.x, npc->pos.z);
gPlayerStatus.targetYaw = atan2(gPlayerStatus.pos.x, gPlayerStatus.pos.z, npc->pos.x, npc->pos.z);
npc->yaw = atan2(N(LastTwinkPosX), N(LastTwinkPosZ), npc->pos.x, npc->pos.z);
N(LastTwinkPosX) = npc->pos.x;
N(LastTwinkPosZ) = npc->pos.z;
return ApiStatus_DONE2;
}
API_CALLABLE(N(UpdateMagikoopaAngles)) {
Npc* npc = get_npc_unsafe(NPC_FlyingMagikoopa);
if (npc->yaw < 180.0f) {
script->varTable[0] = npc->pos.x - 40.0f;
script->varTable[1] = npc->pos.y + 10.0f;
script->varTable[2] = npc->pos.z;
} else {
script->varTable[0] = npc->pos.x + 40.0f;
script->varTable[1] = npc->pos.y + 10.0f;
script->varTable[2] = npc->pos.z;
}
npc->yaw = atan2(script->varTable[6], script->varTable[8], npc->pos.x, npc->pos.z);
return ApiStatus_DONE2;
}
#include "world/common/todo/SetCamera0Flag1000.inc.c"
API_CALLABLE(N(AddOffsetForCamPos)) {
s32 baseX = script->varTable[0];
s32 baseZ = script->varTable[2];
script->varTable[0] = baseX + (sin_deg(310.0f) * 100.0f);
script->varTable[1] = baseZ - (cos_deg(310.0f) * 100.0f);
script->varTable[2] = baseX + (sin_deg(130.0f) * 100.0f);
script->varTable[3] = baseZ - (cos_deg(130.0f) * 100.0f);
return ApiStatus_DONE2;
}
EvtScript N(EVS_UpdateFacingMagikoopa) = {
EVT_CALL(GetNpcPos, NPC_FlyingMagikoopa, LVar6, LVar7, LVar8)
EVT_LABEL(0)
EVT_CALL(GetNpcPos, NPC_FlyingMagikoopa, LVar3, LVar4, LVar5)
EVT_SET(LVar9, LVar3)
EVT_SET(LVarA, LVar4)
EVT_SET(LVarB, LVar5)
EVT_SUB(LVar3, LVar6)
EVT_SUB(LVar4, LVar7)
EVT_SUB(LVar5, LVar8)
EVT_CALL(N(UpdateMagikoopaAngles))
EVT_SET(LVar6, LVar9)
EVT_SET(LVar7, LVarA)
EVT_SET(LVar8, LVarB)
EVT_CALL(PlayerFaceNpc, NPC_FlyingMagikoopa, FALSE)
EVT_CALL(GetAngleBetweenNPCs, NPC_Twink, NPC_FlyingMagikoopa, LVar0)
EVT_CALL(InterpNpcYaw, NPC_Twink, LVar0, 0)
EVT_CALL(GetAngleBetweenNPCs, NPC_PARTNER, NPC_FlyingMagikoopa, LVar0)
EVT_CALL(InterpNpcYaw, NPC_PARTNER, LVar0, 0)
EVT_WAIT(1)
EVT_GOTO(0)
EVT_RETURN
EVT_END
};
EvtScript N(EVS_ConfusedTwinkLookingAround) = {
EVT_LOOP(0)
EVT_CALL(InterpNpcYaw, NPC_Twink, 270, 0)
EVT_WAIT(10 * DT)
EVT_CALL(InterpNpcYaw, NPC_Twink, 90, 0)
EVT_WAIT(10 * DT)
EVT_END_LOOP
EVT_RETURN
EVT_END
};
// add an offset to Twink's position to counter the one from his sprite
EvtScript N(EVS_Twink_CancelHoverOffset) = {
EVT_LOOP(0)
EVT_LOOP(5)
EVT_CALL(GetNpcPos, NPC_Twink, LVar0, LVar1, LVar2)
EVT_ADD(LVar1, 1)
EVT_CALL(SetNpcPos, NPC_Twink, LVar0, LVar1, LVar2)
EVT_WAIT(3)
EVT_END_LOOP
EVT_WAIT(3)
EVT_LOOP(5)
EVT_CALL(GetNpcPos, NPC_Twink, LVar0, LVar1, LVar2)
EVT_ADD(LVar1, -1)
EVT_CALL(SetNpcPos, NPC_Twink, LVar0, LVar1, LVar2)
EVT_WAIT(3)
EVT_END_LOOP
EVT_WAIT(3)
EVT_END_LOOP
EVT_RETURN
EVT_END
};
EvtScript N(EVS_Twink_CarryItem) = {
EVT_LOOP(0)
EVT_CALL(GetNpcPos, NPC_Twink, LVar0, LVar1, LVar2)
EVT_ADD(LVar1, 15)
EVT_ADD(LVar2, 2)
EVT_CALL(SetItemPos, MV_LuckyStarItem, LVar0, LVar1, LVar2)
EVT_WAIT(1)
EVT_END_LOOP
EVT_RETURN
EVT_END
};
EvtScript N(EVS_Scene_MeetingTwink) = {
EVT_LOOP(0)
EVT_WAIT(1)
EVT_CALL(GetPlayerPos, LVar0, LVar1, LVar2)
EVT_IF_LT(LVar0, -100)
EVT_BREAK_LOOP
EVT_END_IF
EVT_END_LOOP
EVT_CALL(DisablePlayerInput, TRUE)
EVT_CALL(func_802CF56C, 2)
EVT_THREAD
EVT_ADD(LVar0, -20)
EVT_CALL(PlayerMoveTo, LVar0, LVar2, 10 * DT)
EVT_END_THREAD
EVT_CALL(SetNpcPos, NPC_Twink, 50, 180, 0)
EVT_CALL(SetNpcJumpscale, NPC_Twink, 0)
EVT_ADD(LVar0, -10)
EVT_CALL(NpcJump0, NPC_Twink, LVar0, 20, LVar2, 15 * DT)
EVT_CALL(PlaySoundAtPlayer, SOUND_HIT_PLAYER_NORMAL, SOUND_SPACE_DEFAULT)
EVT_CALL(SetPlayerAnimation, ANIM_Mario1_VacantStare)
EVT_CALL(SetNpcAnimation, NPC_Twink, ANIM_Twink_Cringe)
EVT_THREAD
EVT_CALL(SetNpcAnimation, NPC_Twink, ANIM_Twink_Disappointed)
EVT_CALL(SetNpcJumpscale, NPC_Twink, EVT_FLOAT(0.6))
EVT_CALL(GetNpcPos, NPC_Twink, LVar0, LVar1, LVar2)
EVT_ADD(LVar0, -100)
EVT_CALL(NpcJump0, NPC_Twink, LVar0, 10, LVar2, 20 * DT)
EVT_CALL(SetPlayerAnimation, ANIM_Mario1_Idle)
EVT_END_THREAD
EVT_THREAD
EVT_CALL(ShakeCam, CAM_DEFAULT, 0, 5, EVT_FLOAT(2.0))
EVT_END_THREAD
EVT_THREAD
EVT_SET(LVar3, 360)
EVT_LOOP(3)
EVT_LABEL(22)
EVT_WAIT(1)
EVT_SUB(LVar3, 45)
EVT_IF_GT(LVar3, 0)
EVT_CALL(SetNpcRotation, NPC_Twink, 0, LVar3, 0)
EVT_IF_EQ(LVar3, 270)
EVT_CALL(SetNpcAnimation, NPC_Twink, ANIM_Twink_Back)
EVT_END_IF
EVT_IF_EQ(LVar3, 90)
EVT_CALL(SetNpcAnimation, NPC_Twink, ANIM_Twink_Disappointed)
EVT_END_IF
EVT_GOTO(22)
EVT_END_IF
EVT_END_LOOP
EVT_CALL(SetNpcRotation, NPC_Twink, 0, 0, 0)
EVT_END_THREAD
EVT_CALL(ShowMessageAtScreenPos, MSG_HOS_000D, 160, 40)
EVT_CALL(SetMusicTrack, 0, SONG_TWINK_THEME, 0, 8)
EVT_CALL(SetPlayerAnimation, ANIM_Mario1_Idle)
EVT_CALL(SetNpcAnimation, NPC_Twink, ANIM_Twink_Idle)
EVT_CALL(NpcFacePlayer, NPC_Twink, 0)
EVT_CALL(SpeakToPlayer, NPC_Twink, ANIM_Twink_Shout, ANIM_Twink_Idle, 0, MSG_HOS_000E)
EVT_CALL(N(SetCamera0Flag1000))
EVT_CALL(GetPlayerPos, LVar0, LVar1, LVar2)
EVT_ADD(LVar0, -20)
EVT_CALL(UseSettingsFrom, CAM_DEFAULT, LVar0, LVar1, LVar2)
EVT_CALL(SetPanTarget, CAM_DEFAULT, LVar0, LVar1, LVar2)
EVT_CALL(SetCamDistance, CAM_DEFAULT, 275)
EVT_CALL(SetCamPitch, CAM_DEFAULT, EVT_FLOAT(16.0), EVT_FLOAT(-8.5))
EVT_CALL(SetCamSpeed, CAM_DEFAULT, EVT_FLOAT(3.0 / DT))
EVT_CALL(PanToTarget, CAM_DEFAULT, 0, 1)
EVT_CALL(SetNpcSpeed, NPC_Twink, EVT_FLOAT(3.0 / DT))
EVT_CALL(GetNpcPos, NPC_Twink, LVar0, LVar1, LVar2)
EVT_ADD(LVar0, 40)
EVT_CALL(NpcMoveTo, NPC_Twink, LVar0, LVar2, 0)
EVT_CALL(ShowEmote, NPC_Twink, EMOTE_SHOCK, 45, 20, EMOTER_NPC, 0, 0, 0, 0)
EVT_WAIT(20 * DT)
EVT_CALL(SetNpcAnimation, NPC_Twink, ANIM_Twink_ShoutJoy)
EVT_CALL(SetNpcJumpscale, NPC_Twink, EVT_FLOAT(1.0))
EVT_LOOP(2)
EVT_CALL(GetNpcPos, NPC_Twink, LVar0, LVar1, LVar2)
EVT_CALL(NpcJump0, NPC_Twink, LVar0, LVar1, LVar2, 15 * DT)
EVT_END_LOOP
EVT_CALL(DisablePartnerAI, 0)
EVT_CALL(SetNpcJumpscale, NPC_PARTNER, 0)
EVT_CALL(GetPlayerPos, LVar0, LVar1, LVar2)
EVT_ADD(LVar0, 25)
EVT_CALL(NpcJump0, NPC_PARTNER, LVar0, LVar1, LVar2, 10 * DT)
EVT_CALL(SpeakToPlayer, NPC_Twink, ANIM_Twink_Talk, ANIM_Twink_Idle, 0, MSG_HOS_000F)
EVT_WAIT(5 * DT)
EVT_CALL(GetNpcPos, NPC_Twink, LVar4, LVar5, LVar6)
EVT_CALL(LoadPath, 70 * DT, EVT_PTR(N(TwinkArrivePath)), ARRAY_COUNT(N(TwinkArrivePath)), EASING_LINEAR)
EVT_LABEL(1)
EVT_CALL(GetNextPathPos)
EVT_ADDF(LVar1, LVar4)
EVT_ADDF(LVar2, LVar5)
EVT_ADDF(LVar3, LVar6)
EVT_CALL(SetNpcPos, NPC_Twink, LVar1, LVar2, LVar3)
EVT_CALL(N(HavePartyFaceTwink))
EVT_WAIT(1)
EVT_IF_EQ(LVar0, 1)
EVT_GOTO(1)
EVT_END_IF
EVT_CALL(SetNpcAnimation, NPC_Twink, ANIM_Twink_Idle)
EVT_WAIT(10 * DT)
EVT_CALL(SpeakToPlayer, NPC_Twink, ANIM_Twink_Talk, ANIM_Twink_Idle, 0, MSG_HOS_0010)
EVT_CALL(SetNpcYaw, NPC_Twink, 270)
EVT_CALL(SetNpcAnimation, NPC_Twink, ANIM_Twink_Back)
EVT_CALL(SpeakToPlayer, NPC_Twink, -1, -1, 5, MSG_HOS_0011)
EVT_CALL(GetNpcPos, NPC_Twink, LVar0, LVar1, LVar2)
EVT_ADD(LVar1, 15)
EVT_CALL(MakeItemEntity, ITEM_LUCKY_STAR, LVar0, LVar1, LVar2, ITEM_SPAWN_MODE_DECORATION, 0)
EVT_SET(MV_LuckyStarItem, LVar0)
EVT_EXEC_GET_TID(N(EVS_Twink_CancelHoverOffset), LVar8)
EVT_EXEC_GET_TID(N(EVS_Twink_CarryItem), LVar9)
EVT_CALL(SetNpcAnimation, NPC_Twink, ANIM_Twink_Wink)
EVT_CALL(ContinueSpeech, NPC_Twink, -1, -1, 512, MSG_HOS_0012)
EVT_CALL(GetPlayerPos, LVar5, LVar3, LVar4)
EVT_ADD(LVar5, -30)
EVT_CALL(NpcMoveTo, NPC_Twink, LVar5, LVar4, 15 * DT)
EVT_KILL_THREAD(LVar9)
EVT_KILL_THREAD(LVar8)
EVT_THREAD
EVT_CALL(GetNpcPos, NPC_Twink, LVar0, LVar1, LVar2)
EVT_ADD(LVar1, 15)
EVT_SET(LVar4, LVar1)
EVT_ADD(LVar4, 30)
EVT_SET(LVar3, LVar0)
EVT_CALL(MakeLerp, LVar1, LVar4, 7, EASING_QUADRATIC_OUT)
EVT_LOOP(0)
EVT_CALL(UpdateLerp)
EVT_CALL(SetItemPos, MV_LuckyStarItem, LVar3, LVar0, LVar2)
EVT_WAIT(1)
EVT_ADD(LVar3, 2)
EVT_IF_EQ(LVar1, 0)
EVT_BREAK_LOOP
EVT_END_IF
EVT_END_LOOP
EVT_CALL(GetPlayerPos, LVar5, LVar6, LVar7)
EVT_ADD(LVar6, 38)
EVT_CALL(MakeLerp, LVar0, LVar6, 7, EASING_QUADRATIC_IN)
EVT_LOOP(0)
EVT_CALL(UpdateLerp)
EVT_CALL(SetItemPos, MV_LuckyStarItem, LVar3, LVar0, LVar2)
EVT_WAIT(1)
EVT_ADD(LVar3, 2)
EVT_IF_EQ(LVar1, 0)
EVT_BREAK_LOOP
EVT_END_IF
EVT_END_LOOP
EVT_CALL(RemoveItemEntity, MV_LuckyStarItem)
EVT_END_THREAD
EVT_CALL(SetNpcAnimation, NPC_Twink, ANIM_Twink_TossItem)
EVT_WAIT(11)
EVT_CALL(SetNpcAnimation, NPC_Twink, ANIM_Twink_Pleased)
EVT_SET(LVar0, 7)
EVT_SET(LVar1, 4)
EVT_CALL(ShowGotItem, LVar0, TRUE, 0)
EVT_CALL(AddKeyItem, LVar0)
EVT_CALL(N(EnableActionCommands))
EVT_CALL(SetNpcAnimation, NPC_Twink, ANIM_Twink_Idle)
EVT_WAIT(20 * DT)
EVT_CALL(SpeakToPlayer, NPC_Twink, ANIM_Twink_Talk, ANIM_Twink_Idle, 0, MSG_HOS_0013)
EVT_CALL(ShowChoice, MSG_Choice_0013)
EVT_IF_EQ(LVar0, 1)
EVT_CALL(ContinueSpeech, NPC_Twink, ANIM_Twink_Talk, ANIM_Twink_Idle, 0, MSG_HOS_0014)
EVT_CALL(ShowChoice, MSG_Choice_0013)
EVT_IF_NE(LVar0, 0)
EVT_CALL(ContinueSpeech, NPC_Twink, ANIM_Twink_Talk, ANIM_Twink_Idle, 0, MSG_HOS_0015)
EVT_ELSE
EVT_GOTO(15)
EVT_END_IF
EVT_ELSE
EVT_LABEL(15)
EVT_CALL(ContinueSpeech, NPC_Twink, ANIM_Twink_Talk, ANIM_Twink_Idle, 0, MSG_HOS_0016)
EVT_CALL(SpeakToPlayer, NPC_PARTNER, ANIM_WorldGoombario_Talk, ANIM_WorldGoombario_Idle, 0, MSG_HOS_0017)
EVT_CALL(SpeakToPlayer, NPC_Twink, ANIM_Twink_Talk, ANIM_Twink_Idle, 0, MSG_HOS_0018)
EVT_CALL(SetPlayerAnimation, ANIM_MarioW2_SpeakUp)
EVT_CALL(SetNpcVar, NPC_Twink, 0, 1)
EVT_LOOP(0)
EVT_CALL(GetNpcVar, NPC_Twink, 0, LVar0)
EVT_IF_EQ(LVar0, 2)
EVT_BREAK_LOOP
EVT_END_IF
EVT_WAIT(1)
EVT_END_LOOP
EVT_END_IF
EVT_CALL(DisablePartnerAI, 0)
EVT_CALL(SetNpcAnimation, NPC_PARTNER, PARTNER_ANIM_IDLE)
EVT_CALL(SpeakToPlayer, NPC_Twink, ANIM_Twink_Talk, ANIM_Twink_Idle, 0, MSG_HOS_002F)
EVT_CALL(SpeakToPlayer, NPC_PARTNER, ANIM_WorldGoombario_Talk, ANIM_WorldGoombario_Idle, 0, MSG_HOS_0030)
EVT_WAIT(20 * DT)
EVT_EXEC(N(EVS_PlayKammyKoopaSong))
EVT_CALL(ShowMessageAtScreenPos, MSG_HOS_0031, 320, 0)
EVT_CALL(GetPlayerPos, LVar0, LVar1, LVar2)
EVT_CALL(UseSettingsFrom, CAM_DEFAULT, LVar0, LVar1, LVar2)
EVT_CALL(SetCamSpeed, CAM_DEFAULT, EVT_FLOAT(3.0 / DT))
EVT_CALL(PanToTarget, CAM_DEFAULT, 0, 1)
EVT_CALL(WaitForCam, CAM_DEFAULT, EVT_FLOAT(1.0))
EVT_THREAD
EVT_WAIT(20 * DT)
EVT_CALL(GetPlayerPos, LVar0, LVar1, LVar2)
EVT_ADD(LVar0, -250)
EVT_DIV(LVar0, 2)
EVT_CALL(SetPanTarget, CAM_DEFAULT, -250, 53, 0)
EVT_CALL(SetCamSpeed, CAM_DEFAULT, EVT_FLOAT(1.4 / DT))
EVT_CALL(PanToTarget, CAM_DEFAULT, 0, 1)
EVT_CALL(WaitForCam, CAM_DEFAULT, EVT_FLOAT(1.0))
EVT_WAIT(10 * DT)
EVT_CALL(SetPanTarget, CAM_DEFAULT, LVar0, 0, 0)
EVT_CALL(SetCamSpeed, CAM_DEFAULT, EVT_FLOAT(2.0 / DT))
EVT_CALL(PanToTarget, CAM_DEFAULT, 0, 1)
EVT_CALL(WaitForCam, CAM_DEFAULT, EVT_FLOAT(1.0))
EVT_END_THREAD
EVT_EXEC_GET_TID(N(EVS_UpdateFacingMagikoopa), LVarA)
EVT_CALL(LoadPath, 67 * DT, EVT_PTR(N(FlightPath_Magikoopa)), ARRAY_COUNT(N(FlightPath_Magikoopa)), EASING_LINEAR)
EVT_LABEL(2)
EVT_CALL(GetNextPathPos)
EVT_CALL(SetNpcPos, NPC_FlyingMagikoopa, LVar1, LVar2, LVar3)
EVT_WAIT(1)
EVT_IF_EQ(LVar0, 1)
EVT_GOTO(2)
EVT_END_IF
EVT_KILL_THREAD(LVarA)
EVT_CHILD_THREAD
EVT_LOOP(0)
EVT_LOOP(10)
EVT_CALL(GetNpcPos, NPC_FlyingMagikoopa, LVar0, LVar1, LVar2)
EVT_ADD(LVar1, 1)
EVT_CALL(SetNpcPos, NPC_FlyingMagikoopa, LVar0, LVar1, LVar2)
EVT_WAIT(2)
EVT_END_LOOP
EVT_WAIT(3)
EVT_LOOP(10)
EVT_CALL(GetNpcPos, NPC_FlyingMagikoopa, LVar0, LVar1, LVar2)
EVT_ADD(LVar1, -1)
EVT_CALL(SetNpcPos, NPC_FlyingMagikoopa, LVar0, LVar1, LVar2)
EVT_WAIT(2)
EVT_END_LOOP
EVT_WAIT(3)
EVT_END_LOOP
EVT_END_CHILD_THREAD
EVT_CALL(SetNpcAnimation, NPC_FlyingMagikoopa, ANIM_FlyingMagikoopa_Anim00)
EVT_LOOP(3)
EVT_CALL(SetNpcRotation, NPC_FlyingMagikoopa, 0, 0, 10)
EVT_WAIT(1)
EVT_CALL(SetNpcRotation, NPC_FlyingMagikoopa, 0, 0, 0)
EVT_WAIT(1)
EVT_END_LOOP
EVT_WAIT(20 * DT)
EVT_EXEC_GET_TID(N(EVS_ConfusedTwinkLookingAround), LVarA)
EVT_CALL(SpeakToPlayer, NPC_Twink, ANIM_Twink_Talk, ANIM_Twink_Still, 5, MSG_HOS_0032)
EVT_KILL_THREAD(LVarA)
EVT_WAIT(10 * DT)
EVT_CALL(GetNpcPos, NPC_FlyingMagikoopa, LVar0, LVar1, LVar2)
EVT_CALL(UseSettingsFrom, CAM_DEFAULT, LVar0, 0, LVar2)
EVT_CALL(SetPanTarget, CAM_DEFAULT, LVar0, LVar1, LVar2)
EVT_CALL(SetCamDistance, CAM_DEFAULT, EVT_FLOAT(260.0))
EVT_CALL(SetCamPitch, CAM_DEFAULT, EVT_FLOAT(13.0), EVT_FLOAT(-8.0))
EVT_CALL(SetCamSpeed, CAM_DEFAULT, EVT_FLOAT(90.0))
EVT_CALL(PanToTarget, CAM_DEFAULT, 0, 1)
EVT_WAIT(1)
EVT_CALL(SpeakToPlayer, NPC_FlyingMagikoopa, ANIM_FlyingMagikoopa_Anim09, ANIM_FlyingMagikoopa_Anim00, 512, MSG_HOS_0033)
EVT_CALL(GetPlayerPos, LVar0, LVar1, LVar2)
EVT_ADD(LVar0, -250)
EVT_DIV(LVar0, 2)
EVT_CALL(UseSettingsFrom, CAM_DEFAULT, LVar0, 0, 0)
EVT_CALL(SetPanTarget, CAM_DEFAULT, LVar0, 0, 0)
EVT_CALL(SetCamSpeed, CAM_DEFAULT, EVT_FLOAT(4.0 / DT))
EVT_CALL(PanToTarget, CAM_DEFAULT, 0, 1)
EVT_CALL(SetNpcVar, NPC_FlyingMagikoopa, 0, 1)
EVT_CALL(EnablePartnerAI)
EVT_UNBIND
EVT_RETURN
EVT_END
};
EvtScript N(EVS_Scene_TwinkDeparts) = {
EVT_CALL(SetTimeFreezeMode, TIME_FREEZE_PARTIAL)
EVT_CALL(SetMusicTrack, 0, SONG_TWINK_THEME, 0, 8)
EVT_WAIT(60 * DT)
EVT_CALL(SetCamLeadPlayer, CAM_DEFAULT, TRUE)
EVT_CALL(GetNpcPos, NPC_Twink, LVar3, LVar1, LVar2)
EVT_CALL(GetPlayerPos, LVar0, LVar1, LVar2)
EVT_ADD(LVar0, LVar3)
EVT_DIV(LVar0, 2)
EVT_CALL(SetPanTarget, CAM_DEFAULT, LVar0, LVar1, LVar2)
EVT_CALL(SetCamDistance, CAM_DEFAULT, EVT_FLOAT(275.0))
EVT_CALL(SetCamPitch, CAM_DEFAULT, EVT_FLOAT(16.0), EVT_FLOAT(-9.0))
EVT_CALL(SetCamSpeed, CAM_DEFAULT, EVT_FLOAT(90.0))
EVT_CALL(PanToTarget, CAM_DEFAULT, 0, 1)
EVT_WAIT(1)
EVT_CALL(InterpNpcYaw, NPC_Twink, 90, 0)
EVT_CALL(SpeakToPlayer, NPC_Twink, ANIM_Twink_Talk, ANIM_Twink_Idle, 0, MSG_HOS_0034)
EVT_CALL(GetPlayerPos, LVar0, LVar1, LVar2)
EVT_CALL(SetPlayerJumpscale, EVT_FLOAT(0.5))
EVT_CALL(PlayerJump, LVar0, LVar1, LVar2, 15 * DT)
EVT_WAIT(10 * DT)
EVT_CALL(SpeakToPlayer, NPC_Twink, ANIM_Twink_Talk, ANIM_Twink_Idle, 0, MSG_HOS_0035)
EVT_CALL(GetPlayerPos, LVar0, LVar1, LVar2)
EVT_CALL(UseSettingsFrom, CAM_DEFAULT, LVar0, LVar1, LVar2)
EVT_CALL(SetPanTarget, CAM_DEFAULT, LVar0, LVar1, LVar2)
EVT_CALL(SetCamDistance, CAM_DEFAULT, EVT_FLOAT(700.0))
EVT_CALL(SetCamSpeed, CAM_DEFAULT, EVT_FLOAT(1.3 / DT))
EVT_CALL(PanToTarget, CAM_DEFAULT, 0, 1)
EVT_CALL(GetNpcPos, NPC_Twink, LVar4, LVar5, LVar6)
EVT_CALL(LoadPath, 85 * DT, EVT_PTR(N(TwinkDepartPath1)), ARRAY_COUNT(N(TwinkDepartPath1)), EASING_LINEAR)
EVT_LABEL(0)
EVT_CALL(GetNextPathPos)
EVT_ADDF(LVar1, LVar4)
EVT_ADDF(LVar2, LVar5)
EVT_ADDF(LVar3, LVar6)
EVT_CALL(SetNpcPos, NPC_Twink, LVar1, LVar2, LVar3)
EVT_CALL(N(HavePartyFaceTwink))
EVT_WAIT(1)
EVT_IF_EQ(LVar0, 1)
EVT_GOTO(0)
EVT_END_IF
EVT_WAIT(45 * DT)
EVT_THREAD
EVT_CALL(LoadPath, 60 * DT, EVT_PTR(N(TwinkReturnPath)), ARRAY_COUNT(N(TwinkReturnPath)), EASING_LINEAR)
EVT_LABEL(1)
EVT_CALL(GetNextPathPos)
EVT_CALL(SetNpcPos, NPC_Twink, LVar1, LVar2, LVar3)
EVT_CALL(N(HavePartyFaceTwink))
EVT_WAIT(1)
EVT_IF_EQ(LVar0, 1)
EVT_GOTO(1)
EVT_END_IF
EVT_END_THREAD
EVT_WAIT(30 * DT)
EVT_CALL(GetNpcPos, NPC_Twink, LVar3, LVar1, LVar2)
EVT_CALL(GetPlayerPos, LVar0, LVar1, LVar2)
EVT_ADD(LVar0, LVar3)
EVT_DIV(LVar0, 2)
EVT_CALL(UseSettingsFrom, CAM_DEFAULT, LVar0, LVar1, LVar2)
EVT_CALL(SetPanTarget, CAM_DEFAULT, LVar0, LVar1, LVar2)
EVT_CALL(SetCamDistance, CAM_DEFAULT, EVT_FLOAT(275.0))
EVT_CALL(SetCamPitch, CAM_DEFAULT, EVT_FLOAT(16.0), EVT_FLOAT(-8.5))
EVT_CALL(SetCamSpeed, CAM_DEFAULT, EVT_FLOAT(2.0 / DT))
EVT_CALL(PanToTarget, CAM_DEFAULT, 0, 1)
EVT_CALL(WaitForCam, CAM_DEFAULT, EVT_FLOAT(1.0))
EVT_CALL(SpeakToPlayer, NPC_Twink, ANIM_Twink_Talk, ANIM_Twink_Idle, 0, MSG_HOS_0036)
EVT_THREAD
EVT_CALL(GetNpcPos, NPC_Twink, LVar4, LVar5, LVar6)
EVT_CALL(LoadPath, 120 * DT, EVT_PTR(N(TwinkDepartPath2)), ARRAY_COUNT(N(TwinkDepartPath2)), EASING_QUADRATIC_IN)
EVT_LABEL(2)
EVT_CALL(GetNextPathPos)
EVT_ADDF(LVar1, LVar4)
EVT_ADDF(LVar2, LVar5)
EVT_ADDF(LVar3, LVar6)
EVT_CALL(SetNpcPos, NPC_Twink, LVar1, LVar2, LVar3)
EVT_CALL(N(HavePartyFaceTwink))
EVT_WAIT(1)
EVT_IF_EQ(LVar0, 1)
EVT_GOTO(2)
EVT_END_IF
EVT_END_THREAD
EVT_CALL(SetCamDistance, CAM_DEFAULT, EVT_FLOAT(300.0))
EVT_CALL(SetCamPitch, CAM_DEFAULT, EVT_FLOAT(12.5), EVT_FLOAT(-10.0))
EVT_CALL(N(AddOffsetForCamPos))
EVT_CALL(SetCamPosA, CAM_DEFAULT, LVar0, LVar1)
EVT_CALL(SetCamPosB, CAM_DEFAULT, LVar2, LVar3)
EVT_CALL(SetCamSpeed, CAM_DEFAULT, EVT_FLOAT(0.5))
EVT_CALL(PanToTarget, CAM_DEFAULT, 0, 1)
EVT_WAIT(30 * DT)
EVT_CALL(SetNpcFlagBits, NPC_Twink, NPC_FLAG_IGNORE_CAMERA_FOR_YAW, TRUE)
EVT_SETF(LVar0, EVT_FLOAT(0.0))
EVT_LOOP(10)
EVT_ADDF(LVar0, EVT_FLOAT(36.0))
EVT_CALL(SetNpcRotation, NPC_Twink, 0, LVar0, 0)
EVT_WAIT(1)
EVT_END_LOOP
EVT_CALL(SetNpcFlagBits, NPC_Twink, NPC_FLAG_IGNORE_CAMERA_FOR_YAW, FALSE)
EVT_WAIT(120 * DT)
EVT_CALL(InterpNpcYaw, NPC_PARTNER, 270, 0)
EVT_CALL(PlayerFaceNpc, NPC_PARTNER, FALSE)
EVT_CALL(GetPlayerPos, LVar0, LVar1, LVar2)
EVT_CALL(UseSettingsFrom, CAM_DEFAULT, LVar0, LVar1, LVar2)
EVT_CALL(SetPanTarget, CAM_DEFAULT, LVar0, LVar1, LVar2)
EVT_CALL(SetCamDistance, CAM_DEFAULT, EVT_FLOAT(275.0))
EVT_CALL(SetCamPitch, CAM_DEFAULT, EVT_FLOAT(13.0), EVT_FLOAT(-7.5))
EVT_CALL(SetCamSpeed, CAM_DEFAULT, EVT_FLOAT(90.0))
EVT_CALL(PanToTarget, CAM_DEFAULT, 0, 1)
EVT_WAIT(10 * DT)
EVT_CALL(DisablePartnerAI, 0)
EVT_CALL(SpeakToPlayer, NPC_PARTNER, ANIM_WorldGoombario_Talk, ANIM_WorldGoombario_Idle, 0, MSG_HOS_0037)
EVT_CALL(EnablePartnerAI)
EVT_CALL(SetPlayerAnimation, ANIM_Mario1_NodYes)
EVT_WAIT(30 * DT)
EVT_CALL(SetPlayerAnimation, ANIM_Mario1_Idle)
EVT_THREAD
EVT_SET(GB_StoryProgress, STORY_CH0_TWINK_GAVE_LUCKY_STAR)
EVT_CALL(SetTimeFreezeMode, TIME_FREEZE_NORMAL)
EVT_CALL(ResetCam, CAM_DEFAULT, EVT_FLOAT(4.0 / DT))
EVT_END_THREAD
EVT_WAIT(10 * DT)
EVT_CALL(DisablePlayerInput, FALSE)
EVT_RETURN
EVT_END
};
|
d06e42f9520f4ed2a037369c5d69f54b3c9cf21e
|
99bdb3251fecee538e0630f15f6574054dfc1468
|
/bsp/stm32/libraries/STM32U5xx_HAL/STM32U5xx_HAL_Driver/Src/stm32u5xx_hal_dma2d.c
|
b47cc6e28c42466fbc90af3987155924aa392a61
|
[
"Apache-2.0",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"MIT",
"BSD-3-Clause",
"X11",
"BSD-4-Clause-UC",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
RT-Thread/rt-thread
|
03a7c52c2aeb1b06a544143b0e803d72f47d1ece
|
3602f891211904a27dcbd51e5ba72fefce7326b2
|
refs/heads/master
| 2023-09-01T04:10:20.295801
| 2023-08-31T16:20:55
| 2023-08-31T16:20:55
| 7,408,108
| 9,599
| 5,805
|
Apache-2.0
| 2023-09-14T13:37:26
| 2013-01-02T14:49:21
|
C
|
UTF-8
|
C
| false
| false
| 75,284
|
c
|
stm32u5xx_hal_dma2d.c
|
/**
******************************************************************************
* @file stm32u5xx_hal_dma2d.c
* @author MCD Application Team
* @brief DMA2D HAL module driver.
* This file provides firmware functions to manage the following
* functionalities of the DMA2D peripheral:
* + Initialization and de-initialization functions
* + IO operation functions
* + Peripheral Control functions
* + Peripheral State and Errors functions
*
******************************************************************************
* @attention
*
* Copyright (c) 2021 STMicroelectronics.
* All rights reserved.
*
* This software is licensed under terms that can be found in the LICENSE file
* in the root directory of this software component.
* If no LICENSE file comes with this software, it is provided AS-IS.
*
******************************************************************************
@verbatim
==============================================================================
##### How to use this driver #####
==============================================================================
[..]
(#) Program the required configuration through the following parameters:
the transfer mode, the output color mode and the output offset using
HAL_DMA2D_Init() function.
(#) Program the required configuration through the following parameters:
the input color mode, the input color, the input alpha value, the alpha mode,
the red/blue swap mode, the inverted alpha mode and the input offset using
HAL_DMA2D_ConfigLayer() function for foreground or/and background layer.
*** Polling mode IO operation ***
=================================
[..]
(#) Configure pdata parameter (explained hereafter), destination and data length
and enable the transfer using HAL_DMA2D_Start().
(#) Wait for end of transfer using HAL_DMA2D_PollForTransfer(), at this stage
user can specify the value of timeout according to his end application.
*** Interrupt mode IO operation ***
===================================
[..]
(#) Configure pdata parameter, destination and data length and enable
the transfer using HAL_DMA2D_Start_IT().
(#) Use HAL_DMA2D_IRQHandler() called under DMA2D_IRQHandler() interrupt subroutine.
(#) At the end of data transfer HAL_DMA2D_IRQHandler() function is executed and user can
add his own function by customization of function pointer XferCpltCallback (member
of DMA2D handle structure).
(#) In case of error, the HAL_DMA2D_IRQHandler() function calls the callback
XferErrorCallback.
-@- In Register-to-Memory transfer mode, pdata parameter is the register
color, in Memory-to-memory or Memory-to-Memory with pixel format
conversion pdata is the source address.
-@- Configure the foreground source address, the background source address,
the destination and data length then Enable the transfer using
HAL_DMA2D_BlendingStart() in polling mode and HAL_DMA2D_BlendingStart_IT()
in interrupt mode.
-@- HAL_DMA2D_BlendingStart() and HAL_DMA2D_BlendingStart_IT() functions
are used if the memory to memory with blending transfer mode is selected.
(#) Optionally, configure and enable the CLUT using HAL_DMA2D_CLUTLoad() in polling
mode or HAL_DMA2D_CLUTLoad_IT() in interrupt mode.
(#) Optionally, configure the line watermark in using the API HAL_DMA2D_ProgramLineEvent().
(#) Optionally, configure the dead time value in the AHB clock cycle inserted between two
consecutive accesses on the AHB master port in using the API HAL_DMA2D_ConfigDeadTime()
and enable/disable the functionality with the APIs HAL_DMA2D_EnableDeadTime() or
HAL_DMA2D_DisableDeadTime().
(#) The transfer can be suspended, resumed and aborted using the following
functions: HAL_DMA2D_Suspend(), HAL_DMA2D_Resume(), HAL_DMA2D_Abort().
(#) The CLUT loading can be suspended, resumed and aborted using the following
functions: HAL_DMA2D_CLUTLoading_Suspend(), HAL_DMA2D_CLUTLoading_Resume(),
HAL_DMA2D_CLUTLoading_Abort().
(#) To control the DMA2D state, use the following function: HAL_DMA2D_GetState().
(#) To read the DMA2D error code, use the following function: HAL_DMA2D_GetError().
*** DMA2D HAL driver macros list ***
=============================================
[..]
Below the list of most used macros in DMA2D HAL driver :
(+) __HAL_DMA2D_ENABLE: Enable the DMA2D peripheral.
(+) __HAL_DMA2D_GET_FLAG: Get the DMA2D pending flags.
(+) __HAL_DMA2D_CLEAR_FLAG: Clear the DMA2D pending flags.
(+) __HAL_DMA2D_ENABLE_IT: Enable the specified DMA2D interrupts.
(+) __HAL_DMA2D_DISABLE_IT: Disable the specified DMA2D interrupts.
(+) __HAL_DMA2D_GET_IT_SOURCE: Check whether the specified DMA2D interrupt is enabled or not.
*** Callback registration ***
===================================
[..]
(#) The compilation define USE_HAL_DMA2D_REGISTER_CALLBACKS when set to 1
allows the user to configure dynamically the driver callbacks.
Use function @ref HAL_DMA2D_RegisterCallback() to register a user callback.
(#) Function @ref HAL_DMA2D_RegisterCallback() allows to register following callbacks:
(+) XferCpltCallback : callback for transfer complete.
(+) XferErrorCallback : callback for transfer error.
(+) LineEventCallback : callback for line event.
(+) CLUTLoadingCpltCallback : callback for CLUT loading completion.
(+) MspInitCallback : DMA2D MspInit.
(+) MspDeInitCallback : DMA2D MspDeInit.
This function takes as parameters the HAL peripheral handle, the Callback ID
and a pointer to the user callback function.
(#) Use function @ref HAL_DMA2D_UnRegisterCallback() to reset a callback to the default
weak (surcharged) function.
@ref HAL_DMA2D_UnRegisterCallback() takes as parameters the HAL peripheral handle,
and the Callback ID.
This function allows to reset following callbacks:
(+) XferCpltCallback : callback for transfer complete.
(+) XferErrorCallback : callback for transfer error.
(+) LineEventCallback : callback for line event.
(+) CLUTLoadingCpltCallback : callback for CLUT loading completion.
(+) MspInitCallback : DMA2D MspInit.
(+) MspDeInitCallback : DMA2D MspDeInit.
(#) By default, after the @ref HAL_DMA2D_Init and if the state is HAL_DMA2D_STATE_RESET
all callbacks are reset to the corresponding legacy weak (surcharged) functions:
examples @ref HAL_DMA2D_LineEventCallback(), @ref HAL_DMA2D_CLUTLoadingCpltCallback()
Exception done for MspInit and MspDeInit callbacks that are respectively
reset to the legacy weak (surcharged) functions in the @ref HAL_DMA2D_Init
and @ref HAL_DMA2D_DeInit only when these callbacks are null (not registered beforehand)
If not, MspInit or MspDeInit are not null, the @ref HAL_DMA2D_Init and @ref HAL_DMA2D_DeInit
keep and use the user MspInit/MspDeInit callbacks (registered beforehand).
Exception as well for Transfer Completion and Transfer Error callbacks that are not defined
as weak (surcharged) functions. They must be defined by the user to be resorted to.
Callbacks can be registered/unregistered in READY state only.
Exception done for MspInit/MspDeInit callbacks that can be registered/unregistered
in READY or RESET state, thus registered (user) MspInit/DeInit callbacks can be used
during the Init/DeInit.
In that case first register the MspInit/MspDeInit user callbacks
using @ref HAL_DMA2D_RegisterCallback before calling @ref HAL_DMA2D_DeInit
or @ref HAL_DMA2D_Init function.
When The compilation define USE_HAL_DMA2D_REGISTER_CALLBACKS is set to 0 or
not defined, the callback registering feature is not available
and weak (surcharged) callbacks are used.
[..]
(@) You can refer to the DMA2D HAL driver header file for more useful macros
@endverbatim
******************************************************************************
*/
/* Includes ------------------------------------------------------------------*/
#include "stm32u5xx_hal.h"
#ifdef HAL_DMA2D_MODULE_ENABLED
#if defined (DMA2D)
/** @addtogroup STM32U5xx_HAL_Driver
* @{
*/
/** @defgroup DMA2D DMA2D
* @brief DMA2D HAL module driver
* @{
*/
/* Private types -------------------------------------------------------------*/
/* Private define ------------------------------------------------------------*/
/** @defgroup DMA2D_Private_Constants DMA2D Private Constants
* @{
*/
/** @defgroup DMA2D_TimeOut DMA2D Time Out
* @{
*/
#define DMA2D_TIMEOUT_ABORT (1000U) /*!< 1s */
#define DMA2D_TIMEOUT_SUSPEND (1000U) /*!< 1s */
/**
* @}
*/
/**
* @}
*/
/* Private variables ---------------------------------------------------------*/
/* Private constants ---------------------------------------------------------*/
/* Private macro -------------------------------------------------------------*/
/* Private function prototypes -----------------------------------------------*/
/** @addtogroup DMA2D_Private_Functions DMA2D Private Functions
* @{
*/
static void DMA2D_SetConfig(DMA2D_HandleTypeDef *hdma2d, uint32_t pdata, uint32_t DstAddress, uint32_t Width,
uint32_t Height);
/**
* @}
*/
/* Private functions ---------------------------------------------------------*/
/* Exported functions --------------------------------------------------------*/
/** @defgroup DMA2D_Exported_Functions DMA2D Exported Functions
* @{
*/
/** @defgroup DMA2D_Exported_Functions_Group1 Initialization and de-initialization functions
* @brief Initialization and Configuration functions
*
@verbatim
===============================================================================
##### Initialization and Configuration functions #####
===============================================================================
[..] This section provides functions allowing to:
(+) Initialize and configure the DMA2D
(+) De-initialize the DMA2D
@endverbatim
* @{
*/
/**
* @brief Initialize the DMA2D according to the specified
* parameters in the DMA2D_InitTypeDef and create the associated handle.
* @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_Init(DMA2D_HandleTypeDef *hdma2d)
{
/* Check the DMA2D peripheral state */
if (hdma2d == NULL)
{
return HAL_ERROR;
}
/* Check the parameters */
assert_param(IS_DMA2D_ALL_INSTANCE(hdma2d->Instance));
assert_param(IS_DMA2D_MODE(hdma2d->Init.Mode));
assert_param(IS_DMA2D_CMODE(hdma2d->Init.ColorMode));
assert_param(IS_DMA2D_OFFSET(hdma2d->Init.OutputOffset));
assert_param(IS_DMA2D_ALPHA_INVERTED(hdma2d->Init.AlphaInverted));
assert_param(IS_DMA2D_RB_SWAP(hdma2d->Init.RedBlueSwap));
assert_param(IS_DMA2D_LOM_MODE(hdma2d->Init.LineOffsetMode));
assert_param(IS_DMA2D_BYTES_SWAP(hdma2d->Init.BytesSwap));
#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1)
if (hdma2d->State == HAL_DMA2D_STATE_RESET)
{
/* Reset Callback pointers in HAL_DMA2D_STATE_RESET only */
hdma2d->LineEventCallback = HAL_DMA2D_LineEventCallback;
hdma2d->CLUTLoadingCpltCallback = HAL_DMA2D_CLUTLoadingCpltCallback;
if (hdma2d->MspInitCallback == NULL)
{
hdma2d->MspInitCallback = HAL_DMA2D_MspInit;
}
/* Init the low level hardware */
hdma2d->MspInitCallback(hdma2d);
}
#else
if (hdma2d->State == HAL_DMA2D_STATE_RESET)
{
/* Allocate lock resource and initialize it */
hdma2d->Lock = HAL_UNLOCKED;
/* Init the low level hardware */
HAL_DMA2D_MspInit(hdma2d);
}
#endif /* (USE_HAL_DMA2D_REGISTER_CALLBACKS) */
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
/* DMA2D CR register configuration -------------------------------------------*/
MODIFY_REG(hdma2d->Instance->CR, DMA2D_CR_MODE | DMA2D_CR_LOM, hdma2d->Init.Mode | hdma2d->Init.LineOffsetMode);
/* DMA2D OPFCCR register configuration ---------------------------------------*/
MODIFY_REG(hdma2d->Instance->OPFCCR, DMA2D_OPFCCR_CM | DMA2D_OPFCCR_SB,
hdma2d->Init.ColorMode | hdma2d->Init.BytesSwap);
/* DMA2D OOR register configuration ------------------------------------------*/
MODIFY_REG(hdma2d->Instance->OOR, DMA2D_OOR_LO, hdma2d->Init.OutputOffset);
/* DMA2D OPFCCR AI and RBS fields setting (Output Alpha Inversion)*/
MODIFY_REG(hdma2d->Instance->OPFCCR, (DMA2D_OPFCCR_AI | DMA2D_OPFCCR_RBS),
((hdma2d->Init.AlphaInverted << DMA2D_OPFCCR_AI_Pos) | \
(hdma2d->Init.RedBlueSwap << DMA2D_OPFCCR_RBS_Pos)));
/* Update error code */
hdma2d->ErrorCode = HAL_DMA2D_ERROR_NONE;
/* Initialize the DMA2D state*/
hdma2d->State = HAL_DMA2D_STATE_READY;
return HAL_OK;
}
/**
* @brief Deinitializes the DMA2D peripheral registers to their default reset
* values.
* @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @retval None
*/
HAL_StatusTypeDef HAL_DMA2D_DeInit(DMA2D_HandleTypeDef *hdma2d)
{
/* Check the DMA2D peripheral state */
if (hdma2d == NULL)
{
return HAL_ERROR;
}
/* Before aborting any DMA2D transfer or CLUT loading, check
first whether or not DMA2D clock is enabled */
if (__HAL_RCC_DMA2D_IS_CLK_ENABLED())
{
/* Abort DMA2D transfer if any */
if ((hdma2d->Instance->CR & DMA2D_CR_START) == DMA2D_CR_START)
{
if (HAL_DMA2D_Abort(hdma2d) != HAL_OK)
{
/* Issue when aborting DMA2D transfer */
return HAL_ERROR;
}
}
else
{
/* Abort background CLUT loading if any */
if ((hdma2d->Instance->BGPFCCR & DMA2D_BGPFCCR_START) == DMA2D_BGPFCCR_START)
{
if (HAL_DMA2D_CLUTLoading_Abort(hdma2d, 0U) != HAL_OK)
{
/* Issue when aborting background CLUT loading */
return HAL_ERROR;
}
}
else
{
/* Abort foreground CLUT loading if any */
if ((hdma2d->Instance->FGPFCCR & DMA2D_FGPFCCR_START) == DMA2D_FGPFCCR_START)
{
if (HAL_DMA2D_CLUTLoading_Abort(hdma2d, 1U) != HAL_OK)
{
/* Issue when aborting foreground CLUT loading */
return HAL_ERROR;
}
}
}
}
}
/* Reset DMA2D control registers*/
hdma2d->Instance->CR = 0U;
hdma2d->Instance->IFCR = 0x3FU;
hdma2d->Instance->FGOR = 0U;
hdma2d->Instance->BGOR = 0U;
hdma2d->Instance->FGPFCCR = 0U;
hdma2d->Instance->BGPFCCR = 0U;
hdma2d->Instance->OPFCCR = 0U;
#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1)
if (hdma2d->MspDeInitCallback == NULL)
{
hdma2d->MspDeInitCallback = HAL_DMA2D_MspDeInit;
}
/* DeInit the low level hardware */
hdma2d->MspDeInitCallback(hdma2d);
#else
/* Carry on with de-initialization of low level hardware */
HAL_DMA2D_MspDeInit(hdma2d);
#endif /* (USE_HAL_DMA2D_REGISTER_CALLBACKS) */
/* Update error code */
hdma2d->ErrorCode = HAL_DMA2D_ERROR_NONE;
/* Initialize the DMA2D state*/
hdma2d->State = HAL_DMA2D_STATE_RESET;
/* Release Lock */
__HAL_UNLOCK(hdma2d);
return HAL_OK;
}
/**
* @brief Initializes the DMA2D MSP.
* @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @retval None
*/
__weak void HAL_DMA2D_MspInit(DMA2D_HandleTypeDef *hdma2d)
{
/* Prevent unused argument(s) compilation warning */
UNUSED(hdma2d);
/* NOTE : This function should not be modified; when the callback is needed,
the HAL_DMA2D_MspInit can be implemented in the user file.
*/
}
/**
* @brief DeInitializes the DMA2D MSP.
* @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @retval None
*/
__weak void HAL_DMA2D_MspDeInit(DMA2D_HandleTypeDef *hdma2d)
{
/* Prevent unused argument(s) compilation warning */
UNUSED(hdma2d);
/* NOTE : This function should not be modified; when the callback is needed,
the HAL_DMA2D_MspDeInit can be implemented in the user file.
*/
}
#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1)
/**
* @brief Register a User DMA2D Callback
* To be used instead of the weak (surcharged) predefined callback
* @param hdma2d DMA2D handle
* @param CallbackID ID of the callback to be registered
* This parameter can be one of the following values:
* @arg @ref HAL_DMA2D_TRANSFERCOMPLETE_CB_ID DMA2D transfer complete Callback ID
* @arg @ref HAL_DMA2D_TRANSFERERROR_CB_ID DMA2D transfer error Callback ID
* @arg @ref HAL_DMA2D_LINEEVENT_CB_ID DMA2D line event Callback ID
* @arg @ref HAL_DMA2D_CLUTLOADINGCPLT_CB_ID DMA2D CLUT loading completion Callback ID
* @arg @ref HAL_DMA2D_MSPINIT_CB_ID DMA2D MspInit callback ID
* @arg @ref HAL_DMA2D_MSPDEINIT_CB_ID DMA2D MspDeInit callback ID
* @param pCallback pointer to the Callback function
* @note No weak predefined callbacks are defined for HAL_DMA2D_TRANSFERCOMPLETE_CB_ID or HAL_DMA2D_TRANSFERERROR_CB_ID
* @retval status
*/
HAL_StatusTypeDef HAL_DMA2D_RegisterCallback(DMA2D_HandleTypeDef *hdma2d, HAL_DMA2D_CallbackIDTypeDef CallbackID,
pDMA2D_CallbackTypeDef pCallback)
{
HAL_StatusTypeDef status = HAL_OK;
if (pCallback == NULL)
{
/* Update the error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK;
return HAL_ERROR;
}
/* Process locked */
__HAL_LOCK(hdma2d);
if (HAL_DMA2D_STATE_READY == hdma2d->State)
{
switch (CallbackID)
{
case HAL_DMA2D_TRANSFERCOMPLETE_CB_ID :
hdma2d->XferCpltCallback = pCallback;
break;
case HAL_DMA2D_TRANSFERERROR_CB_ID :
hdma2d->XferErrorCallback = pCallback;
break;
case HAL_DMA2D_LINEEVENT_CB_ID :
hdma2d->LineEventCallback = pCallback;
break;
case HAL_DMA2D_CLUTLOADINGCPLT_CB_ID :
hdma2d->CLUTLoadingCpltCallback = pCallback;
break;
case HAL_DMA2D_MSPINIT_CB_ID :
hdma2d->MspInitCallback = pCallback;
break;
case HAL_DMA2D_MSPDEINIT_CB_ID :
hdma2d->MspDeInitCallback = pCallback;
break;
default :
/* Update the error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK;
/* update return status */
status = HAL_ERROR;
break;
}
}
else if (HAL_DMA2D_STATE_RESET == hdma2d->State)
{
switch (CallbackID)
{
case HAL_DMA2D_MSPINIT_CB_ID :
hdma2d->MspInitCallback = pCallback;
break;
case HAL_DMA2D_MSPDEINIT_CB_ID :
hdma2d->MspDeInitCallback = pCallback;
break;
default :
/* Update the error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK;
/* update return status */
status = HAL_ERROR;
break;
}
}
else
{
/* Update the error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK;
/* update return status */
status = HAL_ERROR;
}
/* Release Lock */
__HAL_UNLOCK(hdma2d);
return status;
}
/**
* @brief Unregister a DMA2D Callback
* DMA2D Callback is redirected to the weak (surcharged) predefined callback
* @param hdma2d DMA2D handle
* @param CallbackID ID of the callback to be unregistered
* This parameter can be one of the following values:
* @arg @ref HAL_DMA2D_TRANSFERCOMPLETE_CB_ID DMA2D transfer complete Callback ID
* @arg @ref HAL_DMA2D_TRANSFERERROR_CB_ID DMA2D transfer error Callback ID
* @arg @ref HAL_DMA2D_LINEEVENT_CB_ID DMA2D line event Callback ID
* @arg @ref HAL_DMA2D_CLUTLOADINGCPLT_CB_ID DMA2D CLUT loading completion Callback ID
* @arg @ref HAL_DMA2D_MSPINIT_CB_ID DMA2D MspInit callback ID
* @arg @ref HAL_DMA2D_MSPDEINIT_CB_ID DMA2D MspDeInit callback ID
* @note No weak predefined callbacks are defined for HAL_DMA2D_TRANSFERCOMPLETE_CB_ID or HAL_DMA2D_TRANSFERERROR_CB_ID
* @retval status
*/
HAL_StatusTypeDef HAL_DMA2D_UnRegisterCallback(DMA2D_HandleTypeDef *hdma2d, HAL_DMA2D_CallbackIDTypeDef CallbackID)
{
HAL_StatusTypeDef status = HAL_OK;
/* Process locked */
__HAL_LOCK(hdma2d);
if (HAL_DMA2D_STATE_READY == hdma2d->State)
{
switch (CallbackID)
{
case HAL_DMA2D_TRANSFERCOMPLETE_CB_ID :
hdma2d->XferCpltCallback = NULL;
break;
case HAL_DMA2D_TRANSFERERROR_CB_ID :
hdma2d->XferErrorCallback = NULL;
break;
case HAL_DMA2D_LINEEVENT_CB_ID :
hdma2d->LineEventCallback = HAL_DMA2D_LineEventCallback;
break;
case HAL_DMA2D_CLUTLOADINGCPLT_CB_ID :
hdma2d->CLUTLoadingCpltCallback = HAL_DMA2D_CLUTLoadingCpltCallback;
break;
case HAL_DMA2D_MSPINIT_CB_ID :
hdma2d->MspInitCallback = HAL_DMA2D_MspInit; /* Legacy weak (surcharged) Msp Init */
break;
case HAL_DMA2D_MSPDEINIT_CB_ID :
hdma2d->MspDeInitCallback = HAL_DMA2D_MspDeInit; /* Legacy weak (surcharged) Msp DeInit */
break;
default :
/* Update the error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK;
/* update return status */
status = HAL_ERROR;
break;
}
}
else if (HAL_DMA2D_STATE_RESET == hdma2d->State)
{
switch (CallbackID)
{
case HAL_DMA2D_MSPINIT_CB_ID :
hdma2d->MspInitCallback = HAL_DMA2D_MspInit; /* Legacy weak (surcharged) Msp Init */
break;
case HAL_DMA2D_MSPDEINIT_CB_ID :
hdma2d->MspDeInitCallback = HAL_DMA2D_MspDeInit; /* Legacy weak (surcharged) Msp DeInit */
break;
default :
/* Update the error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK;
/* update return status */
status = HAL_ERROR;
break;
}
}
else
{
/* Update the error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_INVALID_CALLBACK;
/* update return status */
status = HAL_ERROR;
}
/* Release Lock */
__HAL_UNLOCK(hdma2d);
return status;
}
#endif /* USE_HAL_DMA2D_REGISTER_CALLBACKS */
/**
* @}
*/
/** @defgroup DMA2D_Exported_Functions_Group2 IO operation functions
* @brief IO operation functions
*
@verbatim
===============================================================================
##### IO operation functions #####
===============================================================================
[..] This section provides functions allowing to:
(+) Configure the pdata, destination address and data size then
start the DMA2D transfer.
(+) Configure the source for foreground and background, destination address
and data size then start a MultiBuffer DMA2D transfer.
(+) Configure the pdata, destination address and data size then
start the DMA2D transfer with interrupt.
(+) Configure the source for foreground and background, destination address
and data size then start a MultiBuffer DMA2D transfer with interrupt.
(+) Abort DMA2D transfer.
(+) Suspend DMA2D transfer.
(+) Resume DMA2D transfer.
(+) Enable CLUT transfer.
(+) Configure CLUT loading then start transfer in polling mode.
(+) Configure CLUT loading then start transfer in interrupt mode.
(+) Abort DMA2D CLUT loading.
(+) Suspend DMA2D CLUT loading.
(+) Resume DMA2D CLUT loading.
(+) Poll for transfer complete.
(+) handle DMA2D interrupt request.
(+) Transfer watermark callback.
(+) CLUT Transfer Complete callback.
@endverbatim
* @{
*/
/**
* @brief Start the DMA2D Transfer.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param pdata Configure the source memory Buffer address if
* Memory-to-Memory or Memory-to-Memory with pixel format
* conversion mode is selected, or configure
* the color value if Register-to-Memory mode is selected.
* @param DstAddress The destination memory Buffer address.
* @param Width The width of data to be transferred from source
* to destination (expressed in number of pixels per line).
* @param Height The height of data to be transferred from source to destination (expressed in number of lines).
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_Start(DMA2D_HandleTypeDef *hdma2d, uint32_t pdata, uint32_t DstAddress, uint32_t Width,
uint32_t Height)
{
/* Check the parameters */
assert_param(IS_DMA2D_LINE(Height));
assert_param(IS_DMA2D_PIXEL(Width));
/* Process locked */
__HAL_LOCK(hdma2d);
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
/* Configure the source, destination address and the data size */
DMA2D_SetConfig(hdma2d, pdata, DstAddress, Width, Height);
/* Enable the Peripheral */
__HAL_DMA2D_ENABLE(hdma2d);
return HAL_OK;
}
/**
* @brief Start the DMA2D Transfer with interrupt enabled.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param pdata Configure the source memory Buffer address if
* the Memory-to-Memory or Memory-to-Memory with pixel format
* conversion mode is selected, or configure
* the color value if Register-to-Memory mode is selected.
* @param DstAddress The destination memory Buffer address.
* @param Width The width of data to be transferred from source
* to destination (expressed in number of pixels per line).
* @param Height The height of data to be transferred from source to destination (expressed in number of lines).
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_Start_IT(DMA2D_HandleTypeDef *hdma2d, uint32_t pdata, uint32_t DstAddress, uint32_t Width,
uint32_t Height)
{
/* Check the parameters */
assert_param(IS_DMA2D_LINE(Height));
assert_param(IS_DMA2D_PIXEL(Width));
/* Process locked */
__HAL_LOCK(hdma2d);
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
/* Configure the source, destination address and the data size */
DMA2D_SetConfig(hdma2d, pdata, DstAddress, Width, Height);
/* Enable the transfer complete, transfer error and configuration error interrupts */
__HAL_DMA2D_ENABLE_IT(hdma2d, DMA2D_IT_TC | DMA2D_IT_TE | DMA2D_IT_CE);
/* Enable the Peripheral */
__HAL_DMA2D_ENABLE(hdma2d);
return HAL_OK;
}
/**
* @brief Start the multi-source DMA2D Transfer.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param SrcAddress1 The source memory Buffer address for the foreground layer.
* @param SrcAddress2 The source memory Buffer address for the background layer.
* @param DstAddress The destination memory Buffer address.
* @param Width The width of data to be transferred from source
* to destination (expressed in number of pixels per line).
* @param Height The height of data to be transferred from source to destination (expressed in number of lines).
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_BlendingStart(DMA2D_HandleTypeDef *hdma2d, uint32_t SrcAddress1, uint32_t SrcAddress2,
uint32_t DstAddress, uint32_t Width, uint32_t Height)
{
/* Check the parameters */
assert_param(IS_DMA2D_LINE(Height));
assert_param(IS_DMA2D_PIXEL(Width));
/* Process locked */
__HAL_LOCK(hdma2d);
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
if (hdma2d->Init.Mode == DMA2D_M2M_BLEND_FG)
{
/*blending & fixed FG*/
WRITE_REG(hdma2d->Instance->FGCOLR, SrcAddress1);
/* Configure the source, destination address and the data size */
DMA2D_SetConfig(hdma2d, SrcAddress2, DstAddress, Width, Height);
}
else if (hdma2d->Init.Mode == DMA2D_M2M_BLEND_BG)
{
/*blending & fixed BG*/
WRITE_REG(hdma2d->Instance->BGCOLR, SrcAddress2);
/* Configure the source, destination address and the data size */
DMA2D_SetConfig(hdma2d, SrcAddress1, DstAddress, Width, Height);
}
else
{
/* Configure DMA2D Stream source2 address */
WRITE_REG(hdma2d->Instance->BGMAR, SrcAddress2);
/* Configure the source, destination address and the data size */
DMA2D_SetConfig(hdma2d, SrcAddress1, DstAddress, Width, Height);
}
/* Enable the Peripheral */
__HAL_DMA2D_ENABLE(hdma2d);
return HAL_OK;
}
/**
* @brief Start the multi-source DMA2D Transfer with interrupt enabled.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param SrcAddress1 The source memory Buffer address for the foreground layer.
* @param SrcAddress2 The source memory Buffer address for the background layer.
* @param DstAddress The destination memory Buffer address.
* @param Width The width of data to be transferred from source
* to destination (expressed in number of pixels per line).
* @param Height The height of data to be transferred from source to destination (expressed in number of lines).
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_BlendingStart_IT(DMA2D_HandleTypeDef *hdma2d, uint32_t SrcAddress1, uint32_t SrcAddress2,
uint32_t DstAddress, uint32_t Width, uint32_t Height)
{
/* Check the parameters */
assert_param(IS_DMA2D_LINE(Height));
assert_param(IS_DMA2D_PIXEL(Width));
/* Process locked */
__HAL_LOCK(hdma2d);
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
if (hdma2d->Init.Mode == DMA2D_M2M_BLEND_FG)
{
/*blending & fixed FG*/
WRITE_REG(hdma2d->Instance->FGCOLR, SrcAddress1);
/* Configure the source, destination address and the data size */
DMA2D_SetConfig(hdma2d, SrcAddress2, DstAddress, Width, Height);
}
else if (hdma2d->Init.Mode == DMA2D_M2M_BLEND_BG)
{
/*blending & fixed BG*/
WRITE_REG(hdma2d->Instance->BGCOLR, SrcAddress2);
/* Configure the source, destination address and the data size */
DMA2D_SetConfig(hdma2d, SrcAddress1, DstAddress, Width, Height);
}
else
{
WRITE_REG(hdma2d->Instance->BGMAR, SrcAddress2);
/* Configure the source, destination address and the data size */
DMA2D_SetConfig(hdma2d, SrcAddress1, DstAddress, Width, Height);
}
/* Enable the transfer complete, transfer error and configuration error interrupts */
__HAL_DMA2D_ENABLE_IT(hdma2d, DMA2D_IT_TC | DMA2D_IT_TE | DMA2D_IT_CE);
/* Enable the Peripheral */
__HAL_DMA2D_ENABLE(hdma2d);
return HAL_OK;
}
/**
* @brief Abort the DMA2D Transfer.
* @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_Abort(DMA2D_HandleTypeDef *hdma2d)
{
uint32_t tickstart;
/* Abort the DMA2D transfer */
/* START bit is reset to make sure not to set it again, in the event the HW clears it
between the register read and the register write by the CPU (writing 0 has no
effect on START bitvalue) */
MODIFY_REG(hdma2d->Instance->CR, DMA2D_CR_ABORT | DMA2D_CR_START, DMA2D_CR_ABORT);
/* Get tick */
tickstart = HAL_GetTick();
/* Check if the DMA2D is effectively disabled */
while ((hdma2d->Instance->CR & DMA2D_CR_START) != 0U)
{
if ((HAL_GetTick() - tickstart) > DMA2D_TIMEOUT_ABORT)
{
/* Update error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TIMEOUT;
/* Change the DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_TIMEOUT;
/* Process Unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_TIMEOUT;
}
}
/* Disable the Transfer Complete, Transfer Error and Configuration Error interrupts */
__HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_TC | DMA2D_IT_TE | DMA2D_IT_CE);
/* Change the DMA2D state*/
hdma2d->State = HAL_DMA2D_STATE_READY;
/* Process Unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_OK;
}
/**
* @brief Suspend the DMA2D Transfer.
* @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_Suspend(DMA2D_HandleTypeDef *hdma2d)
{
uint32_t tickstart;
/* Suspend the DMA2D transfer */
/* START bit is reset to make sure not to set it again, in the event the HW clears it
between the register read and the register write by the CPU (writing 0 has no
effect on START bitvalue). */
MODIFY_REG(hdma2d->Instance->CR, DMA2D_CR_SUSP | DMA2D_CR_START, DMA2D_CR_SUSP);
/* Get tick */
tickstart = HAL_GetTick();
/* Check if the DMA2D is effectively suspended */
while ((hdma2d->Instance->CR & (DMA2D_CR_SUSP | DMA2D_CR_START)) == DMA2D_CR_START)
{
if ((HAL_GetTick() - tickstart) > DMA2D_TIMEOUT_SUSPEND)
{
/* Update error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TIMEOUT;
/* Change the DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_TIMEOUT;
return HAL_TIMEOUT;
}
}
/* Check whether or not a transfer is actually suspended and change the DMA2D state accordingly */
if ((hdma2d->Instance->CR & DMA2D_CR_START) != 0U)
{
hdma2d->State = HAL_DMA2D_STATE_SUSPEND;
}
else
{
/* Make sure SUSP bit is cleared since it is meaningless
when no transfer is on-going */
CLEAR_BIT(hdma2d->Instance->CR, DMA2D_CR_SUSP);
}
return HAL_OK;
}
/**
* @brief Resume the DMA2D Transfer.
* @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_Resume(DMA2D_HandleTypeDef *hdma2d)
{
/* Check the SUSP and START bits */
if ((hdma2d->Instance->CR & (DMA2D_CR_SUSP | DMA2D_CR_START)) == (DMA2D_CR_SUSP | DMA2D_CR_START))
{
/* Ongoing transfer is suspended: change the DMA2D state before resuming */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
}
/* Resume the DMA2D transfer */
/* START bit is reset to make sure not to set it again, in the event the HW clears it
between the register read and the register write by the CPU (writing 0 has no
effect on START bitvalue). */
CLEAR_BIT(hdma2d->Instance->CR, (DMA2D_CR_SUSP | DMA2D_CR_START));
return HAL_OK;
}
/**
* @brief Enable the DMA2D CLUT Transfer.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param LayerIdx DMA2D Layer index.
* This parameter can be one of the following values:
* DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1)
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_EnableCLUT(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx)
{
/* Check the parameters */
assert_param(IS_DMA2D_LAYER(LayerIdx));
/* Process locked */
__HAL_LOCK(hdma2d);
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
if (LayerIdx == DMA2D_BACKGROUND_LAYER)
{
/* Enable the background CLUT loading */
SET_BIT(hdma2d->Instance->BGPFCCR, DMA2D_BGPFCCR_START);
}
else
{
/* Enable the foreground CLUT loading */
SET_BIT(hdma2d->Instance->FGPFCCR, DMA2D_FGPFCCR_START);
}
return HAL_OK;
}
/**
* @brief Start DMA2D CLUT Loading.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param CLUTCfg Pointer to a DMA2D_CLUTCfgTypeDef structure that contains
* the configuration information for the color look up table.
* @param LayerIdx DMA2D Layer index.
* This parameter can be one of the following values:
* DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1)
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_CLUTStartLoad(DMA2D_HandleTypeDef *hdma2d, DMA2D_CLUTCfgTypeDef *CLUTCfg, uint32_t LayerIdx)
{
/* Check the parameters */
assert_param(IS_DMA2D_LAYER(LayerIdx));
assert_param(IS_DMA2D_CLUT_CM(CLUTCfg->CLUTColorMode));
assert_param(IS_DMA2D_CLUT_SIZE(CLUTCfg->Size));
/* Process locked */
__HAL_LOCK(hdma2d);
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
/* Configure the CLUT of the background DMA2D layer */
if (LayerIdx == DMA2D_BACKGROUND_LAYER)
{
/* Write background CLUT memory address */
WRITE_REG(hdma2d->Instance->BGCMAR, (uint32_t)CLUTCfg->pCLUT);
/* Write background CLUT size and CLUT color mode */
MODIFY_REG(hdma2d->Instance->BGPFCCR, (DMA2D_BGPFCCR_CS | DMA2D_BGPFCCR_CCM),
((CLUTCfg->Size << DMA2D_BGPFCCR_CS_Pos) | (CLUTCfg->CLUTColorMode << DMA2D_BGPFCCR_CCM_Pos)));
/* Enable the CLUT loading for the background */
SET_BIT(hdma2d->Instance->BGPFCCR, DMA2D_BGPFCCR_START);
}
/* Configure the CLUT of the foreground DMA2D layer */
else
{
/* Write foreground CLUT memory address */
WRITE_REG(hdma2d->Instance->FGCMAR, (uint32_t)CLUTCfg->pCLUT);
/* Write foreground CLUT size and CLUT color mode */
MODIFY_REG(hdma2d->Instance->FGPFCCR, (DMA2D_FGPFCCR_CS | DMA2D_FGPFCCR_CCM),
((CLUTCfg->Size << DMA2D_FGPFCCR_CS_Pos) | (CLUTCfg->CLUTColorMode << DMA2D_FGPFCCR_CCM_Pos)));
/* Enable the CLUT loading for the foreground */
SET_BIT(hdma2d->Instance->FGPFCCR, DMA2D_FGPFCCR_START);
}
return HAL_OK;
}
/**
* @brief Start DMA2D CLUT Loading with interrupt enabled.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param CLUTCfg Pointer to a DMA2D_CLUTCfgTypeDef structure that contains
* the configuration information for the color look up table.
* @param LayerIdx DMA2D Layer index.
* This parameter can be one of the following values:
* DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1)
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_CLUTStartLoad_IT(DMA2D_HandleTypeDef *hdma2d, DMA2D_CLUTCfgTypeDef *CLUTCfg,
uint32_t LayerIdx)
{
/* Check the parameters */
assert_param(IS_DMA2D_LAYER(LayerIdx));
assert_param(IS_DMA2D_CLUT_CM(CLUTCfg->CLUTColorMode));
assert_param(IS_DMA2D_CLUT_SIZE(CLUTCfg->Size));
/* Process locked */
__HAL_LOCK(hdma2d);
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
/* Configure the CLUT of the background DMA2D layer */
if (LayerIdx == DMA2D_BACKGROUND_LAYER)
{
/* Write background CLUT memory address */
WRITE_REG(hdma2d->Instance->BGCMAR, (uint32_t)CLUTCfg->pCLUT);
/* Write background CLUT size and CLUT color mode */
MODIFY_REG(hdma2d->Instance->BGPFCCR, (DMA2D_BGPFCCR_CS | DMA2D_BGPFCCR_CCM),
((CLUTCfg->Size << DMA2D_BGPFCCR_CS_Pos) | (CLUTCfg->CLUTColorMode << DMA2D_BGPFCCR_CCM_Pos)));
/* Enable the CLUT Transfer Complete, transfer Error, configuration Error and CLUT Access Error interrupts */
__HAL_DMA2D_ENABLE_IT(hdma2d, DMA2D_IT_CTC | DMA2D_IT_TE | DMA2D_IT_CE | DMA2D_IT_CAE);
/* Enable the CLUT loading for the background */
SET_BIT(hdma2d->Instance->BGPFCCR, DMA2D_BGPFCCR_START);
}
/* Configure the CLUT of the foreground DMA2D layer */
else
{
/* Write foreground CLUT memory address */
WRITE_REG(hdma2d->Instance->FGCMAR, (uint32_t)CLUTCfg->pCLUT);
/* Write foreground CLUT size and CLUT color mode */
MODIFY_REG(hdma2d->Instance->FGPFCCR, (DMA2D_FGPFCCR_CS | DMA2D_FGPFCCR_CCM),
((CLUTCfg->Size << DMA2D_FGPFCCR_CS_Pos) | (CLUTCfg->CLUTColorMode << DMA2D_FGPFCCR_CCM_Pos)));
/* Enable the CLUT Transfer Complete, transfer Error, configuration Error and CLUT Access Error interrupts */
__HAL_DMA2D_ENABLE_IT(hdma2d, DMA2D_IT_CTC | DMA2D_IT_TE | DMA2D_IT_CE | DMA2D_IT_CAE);
/* Enable the CLUT loading for the foreground */
SET_BIT(hdma2d->Instance->FGPFCCR, DMA2D_FGPFCCR_START);
}
return HAL_OK;
}
/**
* @brief Start DMA2D CLUT Loading.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param CLUTCfg Pointer to a DMA2D_CLUTCfgTypeDef structure that contains
* the configuration information for the color look up table.
* @param LayerIdx DMA2D Layer index.
* This parameter can be one of the following values:
* DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1)
* @note API obsolete and maintained for compatibility with legacy. User is
* invited to resort to HAL_DMA2D_CLUTStartLoad() instead to benefit from
* code compactness, code size and improved heap usage.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_CLUTLoad(DMA2D_HandleTypeDef *hdma2d, DMA2D_CLUTCfgTypeDef CLUTCfg, uint32_t LayerIdx)
{
/* Check the parameters */
assert_param(IS_DMA2D_LAYER(LayerIdx));
assert_param(IS_DMA2D_CLUT_CM(CLUTCfg.CLUTColorMode));
assert_param(IS_DMA2D_CLUT_SIZE(CLUTCfg.Size));
/* Process locked */
__HAL_LOCK(hdma2d);
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
/* Configure the CLUT of the background DMA2D layer */
if (LayerIdx == DMA2D_BACKGROUND_LAYER)
{
/* Write background CLUT memory address */
WRITE_REG(hdma2d->Instance->BGCMAR, (uint32_t)CLUTCfg.pCLUT);
/* Write background CLUT size and CLUT color mode */
MODIFY_REG(hdma2d->Instance->BGPFCCR, (DMA2D_BGPFCCR_CS | DMA2D_BGPFCCR_CCM),
((CLUTCfg.Size << DMA2D_BGPFCCR_CS_Pos) | (CLUTCfg.CLUTColorMode << DMA2D_BGPFCCR_CCM_Pos)));
/* Enable the CLUT loading for the background */
SET_BIT(hdma2d->Instance->BGPFCCR, DMA2D_BGPFCCR_START);
}
/* Configure the CLUT of the foreground DMA2D layer */
else
{
/* Write foreground CLUT memory address */
WRITE_REG(hdma2d->Instance->FGCMAR, (uint32_t)CLUTCfg.pCLUT);
/* Write foreground CLUT size and CLUT color mode */
MODIFY_REG(hdma2d->Instance->FGPFCCR, (DMA2D_FGPFCCR_CS | DMA2D_FGPFCCR_CCM),
((CLUTCfg.Size << DMA2D_FGPFCCR_CS_Pos) | (CLUTCfg.CLUTColorMode << DMA2D_FGPFCCR_CCM_Pos)));
/* Enable the CLUT loading for the foreground */
SET_BIT(hdma2d->Instance->FGPFCCR, DMA2D_FGPFCCR_START);
}
return HAL_OK;
}
/**
* @brief Start DMA2D CLUT Loading with interrupt enabled.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param CLUTCfg Pointer to a DMA2D_CLUTCfgTypeDef structure that contains
* the configuration information for the color look up table.
* @param LayerIdx DMA2D Layer index.
* This parameter can be one of the following values:
* DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1)
* @note API obsolete and maintained for compatibility with legacy. User is
* invited to resort to HAL_DMA2D_CLUTStartLoad_IT() instead to benefit
* from code compactness, code size and improved heap usage.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_CLUTLoad_IT(DMA2D_HandleTypeDef *hdma2d, DMA2D_CLUTCfgTypeDef CLUTCfg, uint32_t LayerIdx)
{
/* Check the parameters */
assert_param(IS_DMA2D_LAYER(LayerIdx));
assert_param(IS_DMA2D_CLUT_CM(CLUTCfg.CLUTColorMode));
assert_param(IS_DMA2D_CLUT_SIZE(CLUTCfg.Size));
/* Process locked */
__HAL_LOCK(hdma2d);
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
/* Configure the CLUT of the background DMA2D layer */
if (LayerIdx == DMA2D_BACKGROUND_LAYER)
{
/* Write background CLUT memory address */
WRITE_REG(hdma2d->Instance->BGCMAR, (uint32_t)CLUTCfg.pCLUT);
/* Write background CLUT size and CLUT color mode */
MODIFY_REG(hdma2d->Instance->BGPFCCR, (DMA2D_BGPFCCR_CS | DMA2D_BGPFCCR_CCM),
((CLUTCfg.Size << DMA2D_BGPFCCR_CS_Pos) | (CLUTCfg.CLUTColorMode << DMA2D_BGPFCCR_CCM_Pos)));
/* Enable the CLUT Transfer Complete, transfer Error, configuration Error and CLUT Access Error interrupts */
__HAL_DMA2D_ENABLE_IT(hdma2d, DMA2D_IT_CTC | DMA2D_IT_TE | DMA2D_IT_CE | DMA2D_IT_CAE);
/* Enable the CLUT loading for the background */
SET_BIT(hdma2d->Instance->BGPFCCR, DMA2D_BGPFCCR_START);
}
/* Configure the CLUT of the foreground DMA2D layer */
else
{
/* Write foreground CLUT memory address */
WRITE_REG(hdma2d->Instance->FGCMAR, (uint32_t)CLUTCfg.pCLUT);
/* Write foreground CLUT size and CLUT color mode */
MODIFY_REG(hdma2d->Instance->FGPFCCR, (DMA2D_FGPFCCR_CS | DMA2D_FGPFCCR_CCM),
((CLUTCfg.Size << DMA2D_FGPFCCR_CS_Pos) | (CLUTCfg.CLUTColorMode << DMA2D_FGPFCCR_CCM_Pos)));
/* Enable the CLUT Transfer Complete, transfer Error, configuration Error and CLUT Access Error interrupts */
__HAL_DMA2D_ENABLE_IT(hdma2d, DMA2D_IT_CTC | DMA2D_IT_TE | DMA2D_IT_CE | DMA2D_IT_CAE);
/* Enable the CLUT loading for the foreground */
SET_BIT(hdma2d->Instance->FGPFCCR, DMA2D_FGPFCCR_START);
}
return HAL_OK;
}
/**
* @brief Abort the DMA2D CLUT loading.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param LayerIdx DMA2D Layer index.
* This parameter can be one of the following values:
* DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1)
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_CLUTLoading_Abort(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx)
{
uint32_t tickstart;
const __IO uint32_t *reg = &(hdma2d->Instance->BGPFCCR); /* by default, point at background register */
/* Abort the CLUT loading */
SET_BIT(hdma2d->Instance->CR, DMA2D_CR_ABORT);
/* If foreground CLUT loading is considered, update local variables */
if (LayerIdx == DMA2D_FOREGROUND_LAYER)
{
reg = &(hdma2d->Instance->FGPFCCR);
}
/* Get tick */
tickstart = HAL_GetTick();
/* Check if the CLUT loading is aborted */
while ((*reg & DMA2D_BGPFCCR_START) != 0U)
{
if ((HAL_GetTick() - tickstart) > DMA2D_TIMEOUT_ABORT)
{
/* Update error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TIMEOUT;
/* Change the DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_TIMEOUT;
/* Process Unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_TIMEOUT;
}
}
/* Disable the CLUT Transfer Complete, Transfer Error, Configuration Error and CLUT Access Error interrupts */
__HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_CTC | DMA2D_IT_TE | DMA2D_IT_CE | DMA2D_IT_CAE);
/* Change the DMA2D state*/
hdma2d->State = HAL_DMA2D_STATE_READY;
/* Process Unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_OK;
}
/**
* @brief Suspend the DMA2D CLUT loading.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param LayerIdx DMA2D Layer index.
* This parameter can be one of the following values:
* DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1)
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_CLUTLoading_Suspend(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx)
{
uint32_t tickstart;
uint32_t loadsuspended;
const __IO uint32_t *reg = &(hdma2d->Instance->BGPFCCR); /* by default, point at background register */
/* Suspend the CLUT loading */
SET_BIT(hdma2d->Instance->CR, DMA2D_CR_SUSP);
/* If foreground CLUT loading is considered, update local variables */
if (LayerIdx == DMA2D_FOREGROUND_LAYER)
{
reg = &(hdma2d->Instance->FGPFCCR);
}
/* Get tick */
tickstart = HAL_GetTick();
/* Check if the CLUT loading is suspended */
/* 1st condition: Suspend Check */
loadsuspended = ((hdma2d->Instance->CR & DMA2D_CR_SUSP) == DMA2D_CR_SUSP) ? 1UL : 0UL;
/* 2nd condition: Not Start Check */
loadsuspended |= ((*reg & DMA2D_BGPFCCR_START) != DMA2D_BGPFCCR_START) ? 1UL : 0UL;
while (loadsuspended == 0UL)
{
if ((HAL_GetTick() - tickstart) > DMA2D_TIMEOUT_SUSPEND)
{
/* Update error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TIMEOUT;
/* Change the DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_TIMEOUT;
return HAL_TIMEOUT;
}
/* 1st condition: Suspend Check */
loadsuspended = ((hdma2d->Instance->CR & DMA2D_CR_SUSP) == DMA2D_CR_SUSP) ? 1UL : 0UL;
/* 2nd condition: Not Start Check */
loadsuspended |= ((*reg & DMA2D_BGPFCCR_START) != DMA2D_BGPFCCR_START) ? 1UL : 0UL;
}
/* Check whether or not a transfer is actually suspended and change the DMA2D state accordingly */
if ((*reg & DMA2D_BGPFCCR_START) != 0U)
{
hdma2d->State = HAL_DMA2D_STATE_SUSPEND;
}
else
{
/* Make sure SUSP bit is cleared since it is meaningless
when no transfer is on-going */
CLEAR_BIT(hdma2d->Instance->CR, DMA2D_CR_SUSP);
}
return HAL_OK;
}
/**
* @brief Resume the DMA2D CLUT loading.
* @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param LayerIdx DMA2D Layer index.
* This parameter can be one of the following values:
* DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1)
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_CLUTLoading_Resume(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx)
{
/* Check the SUSP and START bits for background or foreground CLUT loading */
if (LayerIdx == DMA2D_BACKGROUND_LAYER)
{
/* Background CLUT loading suspension check */
if ((hdma2d->Instance->CR & DMA2D_CR_SUSP) == DMA2D_CR_SUSP)
{
if ((hdma2d->Instance->BGPFCCR & DMA2D_BGPFCCR_START) == DMA2D_BGPFCCR_START)
{
/* Ongoing CLUT loading is suspended: change the DMA2D state before resuming */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
}
}
}
else
{
/* Foreground CLUT loading suspension check */
if ((hdma2d->Instance->CR & DMA2D_CR_SUSP) == DMA2D_CR_SUSP)
{
if ((hdma2d->Instance->FGPFCCR & DMA2D_FGPFCCR_START) == DMA2D_FGPFCCR_START)
{
/* Ongoing CLUT loading is suspended: change the DMA2D state before resuming */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
}
}
}
/* Resume the CLUT loading */
CLEAR_BIT(hdma2d->Instance->CR, DMA2D_CR_SUSP);
return HAL_OK;
}
/**
* @brief Polling for transfer complete or CLUT loading.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param Timeout Timeout duration
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_PollForTransfer(DMA2D_HandleTypeDef *hdma2d, uint32_t Timeout)
{
uint32_t tickstart;
uint32_t layer_start;
__IO uint32_t isrflags = 0x0U;
/* Polling for DMA2D transfer */
if ((hdma2d->Instance->CR & DMA2D_CR_START) != 0U)
{
/* Get tick */
tickstart = HAL_GetTick();
while (__HAL_DMA2D_GET_FLAG(hdma2d, DMA2D_FLAG_TC) == 0U)
{
isrflags = READ_REG(hdma2d->Instance->ISR);
if ((isrflags & (DMA2D_FLAG_CE | DMA2D_FLAG_TE)) != 0U)
{
if ((isrflags & DMA2D_FLAG_CE) != 0U)
{
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_CE;
}
if ((isrflags & DMA2D_FLAG_TE) != 0U)
{
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TE;
}
/* Clear the transfer and configuration error flags */
__HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_CE | DMA2D_FLAG_TE);
/* Change DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_ERROR;
/* Process unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_ERROR;
}
/* Check for the Timeout */
if (Timeout != HAL_MAX_DELAY)
{
if (((HAL_GetTick() - tickstart) > Timeout) || (Timeout == 0U))
{
/* Update error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TIMEOUT;
/* Change the DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_TIMEOUT;
/* Process unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_TIMEOUT;
}
}
}
}
/* Polling for CLUT loading (foreground or background) */
layer_start = hdma2d->Instance->FGPFCCR & DMA2D_FGPFCCR_START;
layer_start |= hdma2d->Instance->BGPFCCR & DMA2D_BGPFCCR_START;
if (layer_start != 0U)
{
/* Get tick */
tickstart = HAL_GetTick();
while (__HAL_DMA2D_GET_FLAG(hdma2d, DMA2D_FLAG_CTC) == 0U)
{
isrflags = READ_REG(hdma2d->Instance->ISR);
if ((isrflags & (DMA2D_FLAG_CAE | DMA2D_FLAG_CE | DMA2D_FLAG_TE)) != 0U)
{
if ((isrflags & DMA2D_FLAG_CAE) != 0U)
{
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_CAE;
}
if ((isrflags & DMA2D_FLAG_CE) != 0U)
{
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_CE;
}
if ((isrflags & DMA2D_FLAG_TE) != 0U)
{
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TE;
}
/* Clear the CLUT Access Error, Configuration Error and Transfer Error flags */
__HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_CAE | DMA2D_FLAG_CE | DMA2D_FLAG_TE);
/* Change DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_ERROR;
/* Process unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_ERROR;
}
/* Check for the Timeout */
if (Timeout != HAL_MAX_DELAY)
{
if (((HAL_GetTick() - tickstart) > Timeout) || (Timeout == 0U))
{
/* Update error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TIMEOUT;
/* Change the DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_TIMEOUT;
/* Process unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_TIMEOUT;
}
}
}
}
/* Clear the transfer complete and CLUT loading flags */
__HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_TC | DMA2D_FLAG_CTC);
/* Change DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_READY;
/* Process unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_OK;
}
/**
* @brief Handle DMA2D interrupt request.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @retval HAL status
*/
void HAL_DMA2D_IRQHandler(DMA2D_HandleTypeDef *hdma2d)
{
uint32_t isrflags = READ_REG(hdma2d->Instance->ISR);
uint32_t crflags = READ_REG(hdma2d->Instance->CR);
/* Transfer Error Interrupt management ***************************************/
if ((isrflags & DMA2D_FLAG_TE) != 0U)
{
if ((crflags & DMA2D_IT_TE) != 0U)
{
/* Disable the transfer Error interrupt */
__HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_TE);
/* Update error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_TE;
/* Clear the transfer error flag */
__HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_TE);
/* Change DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_ERROR;
/* Process Unlocked */
__HAL_UNLOCK(hdma2d);
if (hdma2d->XferErrorCallback != NULL)
{
/* Transfer error Callback */
hdma2d->XferErrorCallback(hdma2d);
}
}
}
/* Configuration Error Interrupt management **********************************/
if ((isrflags & DMA2D_FLAG_CE) != 0U)
{
if ((crflags & DMA2D_IT_CE) != 0U)
{
/* Disable the Configuration Error interrupt */
__HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_CE);
/* Clear the Configuration error flag */
__HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_CE);
/* Update error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_CE;
/* Change DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_ERROR;
/* Process Unlocked */
__HAL_UNLOCK(hdma2d);
if (hdma2d->XferErrorCallback != NULL)
{
/* Transfer error Callback */
hdma2d->XferErrorCallback(hdma2d);
}
}
}
/* CLUT access Error Interrupt management ***********************************/
if ((isrflags & DMA2D_FLAG_CAE) != 0U)
{
if ((crflags & DMA2D_IT_CAE) != 0U)
{
/* Disable the CLUT access error interrupt */
__HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_CAE);
/* Clear the CLUT access error flag */
__HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_CAE);
/* Update error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_CAE;
/* Change DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_ERROR;
/* Process Unlocked */
__HAL_UNLOCK(hdma2d);
if (hdma2d->XferErrorCallback != NULL)
{
/* Transfer error Callback */
hdma2d->XferErrorCallback(hdma2d);
}
}
}
/* Transfer watermark Interrupt management **********************************/
if ((isrflags & DMA2D_FLAG_TW) != 0U)
{
if ((crflags & DMA2D_IT_TW) != 0U)
{
/* Disable the transfer watermark interrupt */
__HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_TW);
/* Clear the transfer watermark flag */
__HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_TW);
/* Transfer watermark Callback */
#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1)
hdma2d->LineEventCallback(hdma2d);
#else
HAL_DMA2D_LineEventCallback(hdma2d);
#endif /* USE_HAL_DMA2D_REGISTER_CALLBACKS */
}
}
/* Transfer Complete Interrupt management ************************************/
if ((isrflags & DMA2D_FLAG_TC) != 0U)
{
if ((crflags & DMA2D_IT_TC) != 0U)
{
/* Disable the transfer complete interrupt */
__HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_TC);
/* Clear the transfer complete flag */
__HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_TC);
/* Update error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_NONE;
/* Change DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_READY;
/* Process Unlocked */
__HAL_UNLOCK(hdma2d);
if (hdma2d->XferCpltCallback != NULL)
{
/* Transfer complete Callback */
hdma2d->XferCpltCallback(hdma2d);
}
}
}
/* CLUT Transfer Complete Interrupt management ******************************/
if ((isrflags & DMA2D_FLAG_CTC) != 0U)
{
if ((crflags & DMA2D_IT_CTC) != 0U)
{
/* Disable the CLUT transfer complete interrupt */
__HAL_DMA2D_DISABLE_IT(hdma2d, DMA2D_IT_CTC);
/* Clear the CLUT transfer complete flag */
__HAL_DMA2D_CLEAR_FLAG(hdma2d, DMA2D_FLAG_CTC);
/* Update error code */
hdma2d->ErrorCode |= HAL_DMA2D_ERROR_NONE;
/* Change DMA2D state */
hdma2d->State = HAL_DMA2D_STATE_READY;
/* Process Unlocked */
__HAL_UNLOCK(hdma2d);
/* CLUT Transfer complete Callback */
#if (USE_HAL_DMA2D_REGISTER_CALLBACKS == 1)
hdma2d->CLUTLoadingCpltCallback(hdma2d);
#else
HAL_DMA2D_CLUTLoadingCpltCallback(hdma2d);
#endif /* USE_HAL_DMA2D_REGISTER_CALLBACKS */
}
}
}
/**
* @brief Transfer watermark callback.
* @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @retval None
*/
__weak void HAL_DMA2D_LineEventCallback(DMA2D_HandleTypeDef *hdma2d)
{
/* Prevent unused argument(s) compilation warning */
UNUSED(hdma2d);
/* NOTE : This function should not be modified; when the callback is needed,
the HAL_DMA2D_LineEventCallback can be implemented in the user file.
*/
}
/**
* @brief CLUT Transfer Complete callback.
* @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @retval None
*/
__weak void HAL_DMA2D_CLUTLoadingCpltCallback(DMA2D_HandleTypeDef *hdma2d)
{
/* Prevent unused argument(s) compilation warning */
UNUSED(hdma2d);
/* NOTE : This function should not be modified; when the callback is needed,
the HAL_DMA2D_CLUTLoadingCpltCallback can be implemented in the user file.
*/
}
/**
* @}
*/
/** @defgroup DMA2D_Exported_Functions_Group3 Peripheral Control functions
* @brief Peripheral Control functions
*
@verbatim
===============================================================================
##### Peripheral Control functions #####
===============================================================================
[..] This section provides functions allowing to:
(+) Configure the DMA2D foreground or background layer parameters.
(+) Configure the DMA2D CLUT transfer.
(+) Configure the line watermark
(+) Configure the dead time value.
(+) Enable or disable the dead time value functionality.
@endverbatim
* @{
*/
/**
* @brief Configure the DMA2D Layer according to the specified
* parameters in the DMA2D_HandleTypeDef.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param LayerIdx DMA2D Layer index.
* This parameter can be one of the following values:
* DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1)
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_ConfigLayer(DMA2D_HandleTypeDef *hdma2d, uint32_t LayerIdx)
{
DMA2D_LayerCfgTypeDef *pLayerCfg;
uint32_t regMask;
uint32_t regValue;
/* Check the parameters */
assert_param(IS_DMA2D_LAYER(LayerIdx));
assert_param(IS_DMA2D_OFFSET(hdma2d->LayerCfg[LayerIdx].InputOffset));
if (hdma2d->Init.Mode != DMA2D_R2M)
{
assert_param(IS_DMA2D_INPUT_COLOR_MODE(hdma2d->LayerCfg[LayerIdx].InputColorMode));
if (hdma2d->Init.Mode != DMA2D_M2M)
{
assert_param(IS_DMA2D_ALPHA_MODE(hdma2d->LayerCfg[LayerIdx].AlphaMode));
}
}
assert_param(IS_DMA2D_ALPHA_INVERTED(hdma2d->LayerCfg[LayerIdx].AlphaInverted));
assert_param(IS_DMA2D_RB_SWAP(hdma2d->LayerCfg[LayerIdx].RedBlueSwap));
/* Process locked */
__HAL_LOCK(hdma2d);
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
pLayerCfg = &hdma2d->LayerCfg[LayerIdx];
/* Prepare the value to be written to the BGPFCCR or FGPFCCR register */
regValue = pLayerCfg->InputColorMode | (pLayerCfg->AlphaMode << DMA2D_BGPFCCR_AM_Pos) | \
(pLayerCfg->AlphaInverted << DMA2D_BGPFCCR_AI_Pos) | (pLayerCfg->RedBlueSwap << DMA2D_BGPFCCR_RBS_Pos);
regMask = (DMA2D_BGPFCCR_CM | DMA2D_BGPFCCR_AM | DMA2D_BGPFCCR_ALPHA | DMA2D_BGPFCCR_AI | DMA2D_BGPFCCR_RBS);
if ((pLayerCfg->InputColorMode == DMA2D_INPUT_A4) || (pLayerCfg->InputColorMode == DMA2D_INPUT_A8))
{
regValue |= (pLayerCfg->InputAlpha & DMA2D_BGPFCCR_ALPHA);
}
else
{
regValue |= (pLayerCfg->InputAlpha << DMA2D_BGPFCCR_ALPHA_Pos);
}
/* Configure the background DMA2D layer */
if (LayerIdx == DMA2D_BACKGROUND_LAYER)
{
/* Write DMA2D BGPFCCR register */
MODIFY_REG(hdma2d->Instance->BGPFCCR, regMask, regValue);
/* DMA2D BGOR register configuration -------------------------------------*/
WRITE_REG(hdma2d->Instance->BGOR, pLayerCfg->InputOffset);
/* DMA2D BGCOLR register configuration -------------------------------------*/
if ((pLayerCfg->InputColorMode == DMA2D_INPUT_A4) || (pLayerCfg->InputColorMode == DMA2D_INPUT_A8))
{
WRITE_REG(hdma2d->Instance->BGCOLR, pLayerCfg->InputAlpha & (DMA2D_BGCOLR_BLUE | DMA2D_BGCOLR_GREEN | \
DMA2D_BGCOLR_RED));
}
}
/* Configure the foreground DMA2D layer */
else
{
/* Write DMA2D FGPFCCR register */
MODIFY_REG(hdma2d->Instance->FGPFCCR, regMask, regValue);
/* DMA2D FGOR register configuration -------------------------------------*/
WRITE_REG(hdma2d->Instance->FGOR, pLayerCfg->InputOffset);
/* DMA2D FGCOLR register configuration -------------------------------------*/
if ((pLayerCfg->InputColorMode == DMA2D_INPUT_A4) || (pLayerCfg->InputColorMode == DMA2D_INPUT_A8))
{
WRITE_REG(hdma2d->Instance->FGCOLR, pLayerCfg->InputAlpha & (DMA2D_FGCOLR_BLUE | DMA2D_FGCOLR_GREEN | \
DMA2D_FGCOLR_RED));
}
}
/* Initialize the DMA2D state*/
hdma2d->State = HAL_DMA2D_STATE_READY;
/* Process unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_OK;
}
/**
* @brief Configure the DMA2D CLUT Transfer.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param CLUTCfg Pointer to a DMA2D_CLUTCfgTypeDef structure that contains
* the configuration information for the color look up table.
* @param LayerIdx DMA2D Layer index.
* This parameter can be one of the following values:
* DMA2D_BACKGROUND_LAYER(0) / DMA2D_FOREGROUND_LAYER(1)
* @note API obsolete and maintained for compatibility with legacy. User is invited
* to resort to HAL_DMA2D_CLUTStartLoad() instead to benefit from code compactness,
* code size and improved heap usage.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_ConfigCLUT(DMA2D_HandleTypeDef *hdma2d, DMA2D_CLUTCfgTypeDef CLUTCfg, uint32_t LayerIdx)
{
/* Check the parameters */
assert_param(IS_DMA2D_LAYER(LayerIdx));
assert_param(IS_DMA2D_CLUT_CM(CLUTCfg.CLUTColorMode));
assert_param(IS_DMA2D_CLUT_SIZE(CLUTCfg.Size));
/* Process locked */
__HAL_LOCK(hdma2d);
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
/* Configure the CLUT of the background DMA2D layer */
if (LayerIdx == DMA2D_BACKGROUND_LAYER)
{
/* Write background CLUT memory address */
WRITE_REG(hdma2d->Instance->BGCMAR, (uint32_t)CLUTCfg.pCLUT);
/* Write background CLUT size and CLUT color mode */
MODIFY_REG(hdma2d->Instance->BGPFCCR, (DMA2D_BGPFCCR_CS | DMA2D_BGPFCCR_CCM),
((CLUTCfg.Size << DMA2D_BGPFCCR_CS_Pos) | (CLUTCfg.CLUTColorMode << DMA2D_BGPFCCR_CCM_Pos)));
}
/* Configure the CLUT of the foreground DMA2D layer */
else
{
/* Write foreground CLUT memory address */
WRITE_REG(hdma2d->Instance->FGCMAR, (uint32_t)CLUTCfg.pCLUT);
/* Write foreground CLUT size and CLUT color mode */
MODIFY_REG(hdma2d->Instance->FGPFCCR, (DMA2D_FGPFCCR_CS | DMA2D_FGPFCCR_CCM),
((CLUTCfg.Size << DMA2D_FGPFCCR_CS_Pos) | (CLUTCfg.CLUTColorMode << DMA2D_FGPFCCR_CCM_Pos)));
}
/* Set the DMA2D state to Ready*/
hdma2d->State = HAL_DMA2D_STATE_READY;
/* Process unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_OK;
}
/**
* @brief Configure the line watermark.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @param Line Line Watermark configuration (maximum 16-bit long value expected).
* @note HAL_DMA2D_ProgramLineEvent() API enables the transfer watermark interrupt.
* @note The transfer watermark interrupt is disabled once it has occurred.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_ProgramLineEvent(DMA2D_HandleTypeDef *hdma2d, uint32_t Line)
{
/* Check the parameters */
if (Line > DMA2D_LWR_LW)
{
return HAL_ERROR;
}
else
{
/* Process locked */
__HAL_LOCK(hdma2d);
/* Change DMA2D peripheral state */
hdma2d->State = HAL_DMA2D_STATE_BUSY;
/* Sets the Line watermark configuration */
WRITE_REG(hdma2d->Instance->LWR, Line);
/* Enable the Line interrupt */
__HAL_DMA2D_ENABLE_IT(hdma2d, DMA2D_IT_TW);
/* Initialize the DMA2D state*/
hdma2d->State = HAL_DMA2D_STATE_READY;
/* Process unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_OK;
}
}
/**
* @brief Enable DMA2D dead time feature.
* @param hdma2d DMA2D handle.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_EnableDeadTime(DMA2D_HandleTypeDef *hdma2d)
{
/* Process Locked */
__HAL_LOCK(hdma2d);
hdma2d->State = HAL_DMA2D_STATE_BUSY;
/* Set DMA2D_AMTCR EN bit */
SET_BIT(hdma2d->Instance->AMTCR, DMA2D_AMTCR_EN);
hdma2d->State = HAL_DMA2D_STATE_READY;
/* Process Unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_OK;
}
/**
* @brief Disable DMA2D dead time feature.
* @param hdma2d DMA2D handle.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_DisableDeadTime(DMA2D_HandleTypeDef *hdma2d)
{
/* Process Locked */
__HAL_LOCK(hdma2d);
hdma2d->State = HAL_DMA2D_STATE_BUSY;
/* Clear DMA2D_AMTCR EN bit */
CLEAR_BIT(hdma2d->Instance->AMTCR, DMA2D_AMTCR_EN);
hdma2d->State = HAL_DMA2D_STATE_READY;
/* Process Unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_OK;
}
/**
* @brief Configure dead time.
* @note The dead time value represents the guaranteed minimum number of cycles between
* two consecutive transactions on the AHB bus.
* @param hdma2d DMA2D handle.
* @param DeadTime dead time value.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DMA2D_ConfigDeadTime(DMA2D_HandleTypeDef *hdma2d, uint8_t DeadTime)
{
/* Process Locked */
__HAL_LOCK(hdma2d);
hdma2d->State = HAL_DMA2D_STATE_BUSY;
/* Set DMA2D_AMTCR DT field */
MODIFY_REG(hdma2d->Instance->AMTCR, DMA2D_AMTCR_DT, (((uint32_t) DeadTime) << DMA2D_AMTCR_DT_Pos));
hdma2d->State = HAL_DMA2D_STATE_READY;
/* Process Unlocked */
__HAL_UNLOCK(hdma2d);
return HAL_OK;
}
/**
* @}
*/
/** @defgroup DMA2D_Exported_Functions_Group4 Peripheral State and Error functions
* @brief Peripheral State functions
*
@verbatim
===============================================================================
##### Peripheral State and Errors functions #####
===============================================================================
[..]
This subsection provides functions allowing to:
(+) Get the DMA2D state
(+) Get the DMA2D error code
@endverbatim
* @{
*/
/**
* @brief Return the DMA2D state
* @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the DMA2D.
* @retval HAL state
*/
HAL_DMA2D_StateTypeDef HAL_DMA2D_GetState(DMA2D_HandleTypeDef *hdma2d)
{
return hdma2d->State;
}
/**
* @brief Return the DMA2D error code
* @param hdma2d pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for DMA2D.
* @retval DMA2D Error Code
*/
uint32_t HAL_DMA2D_GetError(DMA2D_HandleTypeDef *hdma2d)
{
return hdma2d->ErrorCode;
}
/**
* @}
*/
/**
* @}
*/
/** @defgroup DMA2D_Private_Functions DMA2D Private Functions
* @{
*/
/**
* @brief Set the DMA2D transfer parameters.
* @param hdma2d Pointer to a DMA2D_HandleTypeDef structure that contains
* the configuration information for the specified DMA2D.
* @param pdata The source memory Buffer address
* @param DstAddress The destination memory Buffer address
* @param Width The width of data to be transferred from source to destination.
* @param Height The height of data to be transferred from source to destination.
* @retval HAL status
*/
static void DMA2D_SetConfig(DMA2D_HandleTypeDef *hdma2d, uint32_t pdata, uint32_t DstAddress, uint32_t Width,
uint32_t Height)
{
uint32_t tmp;
uint32_t tmp1;
uint32_t tmp2;
uint32_t tmp3;
uint32_t tmp4;
/* Configure DMA2D data size */
MODIFY_REG(hdma2d->Instance->NLR, (DMA2D_NLR_NL | DMA2D_NLR_PL), (Height | (Width << DMA2D_NLR_PL_Pos)));
/* Configure DMA2D destination address */
WRITE_REG(hdma2d->Instance->OMAR, DstAddress);
/* Register to memory DMA2D mode selected */
if (hdma2d->Init.Mode == DMA2D_R2M)
{
tmp1 = pdata & DMA2D_OCOLR_ALPHA_1;
tmp2 = pdata & DMA2D_OCOLR_RED_1;
tmp3 = pdata & DMA2D_OCOLR_GREEN_1;
tmp4 = pdata & DMA2D_OCOLR_BLUE_1;
/* Prepare the value to be written to the OCOLR register according to the color mode */
if (hdma2d->Init.ColorMode == DMA2D_OUTPUT_ARGB8888)
{
tmp = (tmp3 | tmp2 | tmp1 | tmp4);
}
else if (hdma2d->Init.ColorMode == DMA2D_OUTPUT_RGB888)
{
tmp = (tmp3 | tmp2 | tmp4);
}
else if (hdma2d->Init.ColorMode == DMA2D_OUTPUT_RGB565)
{
tmp2 = (tmp2 >> 19U);
tmp3 = (tmp3 >> 10U);
tmp4 = (tmp4 >> 3U);
tmp = ((tmp3 << 5U) | (tmp2 << 11U) | tmp4);
}
else if (hdma2d->Init.ColorMode == DMA2D_OUTPUT_ARGB1555)
{
tmp1 = (tmp1 >> 31U);
tmp2 = (tmp2 >> 19U);
tmp3 = (tmp3 >> 11U);
tmp4 = (tmp4 >> 3U);
tmp = ((tmp3 << 5U) | (tmp2 << 10U) | (tmp1 << 15U) | tmp4);
}
else /* Dhdma2d->Init.ColorMode = DMA2D_OUTPUT_ARGB4444 */
{
tmp1 = (tmp1 >> 28U);
tmp2 = (tmp2 >> 20U);
tmp3 = (tmp3 >> 12U);
tmp4 = (tmp4 >> 4U);
tmp = ((tmp3 << 4U) | (tmp2 << 8U) | (tmp1 << 12U) | tmp4);
}
/* Write to DMA2D OCOLR register */
WRITE_REG(hdma2d->Instance->OCOLR, tmp);
}
else if (hdma2d->Init.Mode == DMA2D_M2M_BLEND_FG) /*M2M_blending with fixed color FG DMA2D Mode selected*/
{
WRITE_REG(hdma2d->Instance->BGMAR, pdata);
}
else /* M2M, M2M_PFC,M2M_Blending or M2M_blending with fixed color BG DMA2D Mode */
{
/* Configure DMA2D source address */
WRITE_REG(hdma2d->Instance->FGMAR, pdata);
}
}
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
#endif /* DMA2D */
#endif /* HAL_DMA2D_MODULE_ENABLED */
|
480431c041e8fe472c2daacd5e13228c86082c51
|
0744dcc5394cebf57ebcba343747af6871b67017
|
/external/iotjs/config/tizenrt/artik05x/app/jerry_port.c
|
886f749bbd6e43e3883d16881f27275c029dc5f2
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
Samsung/TizenRT
|
96abf62f1853f61fcf91ff14671a5e0c6ca48fdb
|
1a5c2e00a4b1bbf4c505bbf5cc6a8259e926f686
|
refs/heads/master
| 2023-08-31T08:59:33.327998
| 2023-08-08T06:09:20
| 2023-08-31T04:38:20
| 82,517,252
| 590
| 719
|
Apache-2.0
| 2023-09-14T06:54:49
| 2017-02-20T04:38:30
|
C
|
UTF-8
|
C
| false
| false
| 1,947
|
c
|
jerry_port.c
|
/* Copyright 2017-present Samsung Electronics Co., Ltd. and other contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "jerryscript-ext/handler.h"
#include "jerryscript-port.h"
#include "jerryscript.h"
/**
* Aborts the program.
*/
void jerry_port_fatal(jerry_fatal_code_t code) {
exit(1);
} /* jerry_port_fatal */
/**
* Provide log message implementation for the engine.
*/
void jerry_port_log(jerry_log_level_t level, /**< log level */
const char *format, /**< format string */
...) { /**< parameters */
/* Drain log messages since IoT.js has not support log levels yet. */
} /* jerry_port_log */
/**
* Dummy function to get the time zone.
*
* @return true
*/
bool jerry_port_get_time_zone(jerry_time_zone_t *tz_p) {
/* We live in UTC. */
tz_p->offset = 0;
tz_p->daylight_saving_time = 0;
return true;
} /* jerry_port_get_time_zone */
/**
* Dummy function to get the current time.
*
* @return 0
*/
double jerry_port_get_current_time(void) {
return 0;
} /* jerry_port_get_current_time */
/**
* Provide the implementation of jerryx_port_handler_print_char.
* Uses 'printf' to print a single character to standard output.
*/
void jerryx_port_handler_print_char(char c) { /**< the character to print */
printf("%c", c);
} /* jerryx_port_handler_print_char */
|
ef03a46ef289f812f24c3065195d0702a301ec75
|
f7dc806f341ef5dbb0e11252a4693003a66853d5
|
/thirdparty/thorvg/src/loaders/svg/tvgXmlParser.h
|
7333bb09fb3f3fe130e0af89f932f162fd9c9194
|
[
"LicenseRef-scancode-free-unknown",
"MIT",
"CC-BY-4.0",
"OFL-1.1",
"Bison-exception-2.2",
"CC0-1.0",
"LicenseRef-scancode-nvidia-2002",
"LicenseRef-scancode-other-permissive",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSL-1.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unicode",
"BSD-2-Clause",
"FTL",
"GPL-3.0-or-later",
"Bitstream-Vera",
"Zlib",
"MPL-2.0",
"MIT-Modern-Variant",
"JSON",
"Libpng"
] |
permissive
|
godotengine/godot
|
8a2419750f4851d1426a8f3bcb52cac5c86f23c2
|
970be7afdc111ccc7459d7ef3560de70e6d08c80
|
refs/heads/master
| 2023-08-21T14:37:00.262883
| 2023-08-21T06:26:15
| 2023-08-21T06:26:15
| 15,634,981
| 68,852
| 18,388
|
MIT
| 2023-09-14T21:42:16
| 2014-01-04T16:05:36
|
C++
|
UTF-8
|
C
| false
| false
| 2,875
|
h
|
tvgXmlParser.h
|
/*
* Copyright (c) 2020 - 2023 the ThorVG project. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _TVG_SIMPLE_XML_PARSER_H_
#define _TVG_SIMPLE_XML_PARSER_H_
#include "tvgSvgLoaderCommon.h"
#define NUMBER_OF_XML_ENTITIES 8
const char* const xmlEntity[] = {""", " ", "'", "&", "<", ">", "#", "'"};
const int xmlEntityLength[] = {6, 6, 6, 5, 4, 4, 6, 6};
enum class SimpleXMLType
{
Open = 0, //!< \<tag attribute="value"\>
OpenEmpty, //!< \<tag attribute="value" /\>
Close, //!< \</tag\>
Data, //!< tag text data
CData, //!< \<![cdata[something]]\>
Error, //!< error contents
Processing, //!< \<?xml ... ?\> \<?php .. ?\>
Doctype, //!< \<!doctype html
Comment, //!< \<!-- something --\>
Ignored, //!< whatever is ignored by parser, like whitespace
DoctypeChild //!< \<!doctype_child
};
typedef bool (*simpleXMLCb)(void* data, SimpleXMLType type, const char* content, unsigned int length);
typedef bool (*simpleXMLAttributeCb)(void* data, const char* key, const char* value);
bool simpleXmlParseAttributes(const char* buf, unsigned bufLength, simpleXMLAttributeCb func, const void* data);
bool simpleXmlParse(const char* buf, unsigned bufLength, bool strip, simpleXMLCb func, const void* data);
bool simpleXmlParseW3CAttribute(const char* buf, unsigned bufLength, simpleXMLAttributeCb func, const void* data);
const char* simpleXmlParseCSSAttribute(const char* buf, unsigned bufLength, char** tag, char** name, const char** attrs, unsigned* attrsLength);
const char* simpleXmlFindAttributesTag(const char* buf, unsigned bufLength);
bool isIgnoreUnsupportedLogElements(const char* tagName);
const char* simpleXmlNodeTypeToString(SvgNodeType type);
#endif //_TVG_SIMPLE_XML_PARSER_H_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.