blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0771c3db51ef258d59270452da5f9df0cfef2ec3
|
61da6274995cf914291af51bd02e60f408fdfedd
|
/src/num/multind.c
|
193e6afe2d88e632382b3ed2bca6c563dd4e8b30
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
mrirecon/bart
|
360d518b4c79836d506803aa4a77e8e252ab820b
|
a3c9dc313f79c4c52f1ba3e617d5831ef088ddf7
|
refs/heads/master
| 2023-08-31T11:01:08.932824
| 2023-08-30T12:15:35
| 2023-08-30T13:51:18
| 23,212,230
| 264
| 185
|
BSD-3-Clause
| 2023-08-03T18:43:36
| 2014-08-22T03:57:09
|
C
|
UTF-8
|
C
| false
| false
| 46,806
|
c
|
multind.c
|
/* Copyright 2013-2015 The Regents of the University of California.
* Copyright 2016-2020. Uecker Lab. University Medical Center Göttingen.
* Copyright 2022. TU Graz. Institute of Biomedical Imaging.
* Copyright 2017. Intel Corporation.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-2020 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2019-2020 Sebastian Rosenzweig
* 2013 Frank Ong <frankong@berkeley.edu>
* 2017 Michael J. Anderson <michael.j.anderson@intel.com>
*
* Generic operations on multi-dimensional arrays. Most functions
* come in two flavours:
*
* 1. A basic version which takes the number of dimensions, an array
* of long integers specifing the size of each dimension, the pointers
* to the data, and the size of each element and other required parameters.
* The data is assumed to be stored in column-major format.
*
* 2. An extended version which takes an array of long integers which
* specifies the strides for each argument.
*
* All functions should work on CPU and GPU and md_copy can be used
* to copy between CPU and GPU.
*
*/
#define _GNU_SOURCE
#include <string.h>
#include <assert.h>
#include <stdbool.h>
#include <stdint.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _WIN32
#include <malloc.h>
#else
#include <alloca.h>
#endif
#include <strings.h>
#include "misc/misc.h"
#include "misc/types.h"
#include "misc/debug.h"
#include "misc/nested.h"
#include "num/optimize.h"
#ifdef USE_CUDA
#include "num/gpuops.h"
#include "num/gpukrnls.h"
#include "num/gpukrnls_copy.h"
#endif
#include "multind.h"
extern struct cuda_threads_s* gpu_threads_create(const void* ref)
{
#ifdef USE_CUDA
if ((NULL == ref) || cuda_ondevice(ref))
return cuda_threads_create();
#else
UNUSED(ref);
#endif
return NULL;
}
extern void gpu_threads_enter(struct cuda_threads_s* x)
{
#ifdef USE_CUDA
cuda_threads_enter(x);
#else
UNUSED(x);
#endif
}
extern void gpu_threads_leave(struct cuda_threads_s* x)
{
#ifdef USE_CUDA
cuda_threads_leave(x);
#else
UNUSED(x);
#endif
}
extern void gpu_threads_free(struct cuda_threads_s* x)
{
#ifdef USE_CUDA
cuda_threads_free(x);
#else
UNUSED(x);
#endif
}
/**
* Generic functions which loops over all dimensions of a set of
* multi-dimensional arrays and calls a given function for each position.
*/
void md_nary(int C, int D, const long dim[D], const long* str[C], void* ptr[C], md_nary_fun_t fun)
{
if (0 == D) {
NESTED_CALL(fun, (ptr));
return;
}
for (long i = 0; i < dim[D - 1]; i++) {
void* moving_ptr[C];
for (int j = 0; j < C; j++)
moving_ptr[j] = ptr[j] + i * str[j][D - 1];
md_nary(C, D - 1, dim, str, moving_ptr, fun);
}
}
/**
* Generic functions which loops over all dimensions of a set of
* multi-dimensional arrays and calls a given function for each position.
* This functions tries to parallelize over the dimensions indicated
* with flags.
*/
void md_parallel_nary(int C, int D, const long dim[D], unsigned long flags, const long* str[C], void* ptr[C], md_nary_fun_t fun)
{
flags = flags & md_nontriv_dims(D, dim);
if (0 == flags) {
md_nary(C, D, dim, str, ptr, fun);
return;
}
long dimc[D];
md_select_dims(D, ~flags, dimc, dim);
// Collect all parallel dimensions
int nparallel = 0;
int parallel_b[D];
long parallel_dim[D];
long total_iterations = 1L;
while (0 != flags) {
int b = ffsl(flags & -flags) - 1;
assert(MD_IS_SET(flags, b));
flags = MD_CLEAR(flags, b);
debug_printf(DP_DEBUG4, "Parallelize: %d\n", dim[b]);
parallel_b[nparallel] = b;
parallel_dim[nparallel] = dim[b];
total_iterations *= parallel_dim[nparallel];
nparallel++;
}
struct cuda_threads_s* gpu_stat = gpu_threads_create(ptr[0]);
#ifdef _OPENMP
int old_threads = omp_get_max_threads();
int outer_threads = MAX(1, MIN(old_threads, total_iterations));
int inner_threads = MAX(1, old_threads / outer_threads);
omp_set_num_threads(outer_threads);
#endif
#pragma omp parallel for
for (long i = 0; i < total_iterations; i++) {
#ifdef _OPENMP
omp_set_num_threads(inner_threads);
#endif
gpu_threads_enter(gpu_stat);
// Recover place in parallel iteration space
long iter_i[D];
long ii = i;
for (int p = nparallel - 1; p >= 0; p--) {
iter_i[p] = ii % parallel_dim[p];
ii /= parallel_dim[p];
}
void* moving_ptr[C];
for (int j = 0; j < C; j++) {
moving_ptr[j] = ptr[j];
for(int p = 0; p < nparallel; p++)
moving_ptr[j] += iter_i[p] * str[j][parallel_b[p]];
}
md_nary(C, D, dimc, str, moving_ptr, fun);
gpu_threads_leave(gpu_stat);
}
#ifdef _OPENMP
omp_set_num_threads(old_threads);
#endif
gpu_threads_free(gpu_stat);
}
static void md_loop_r(int D, const long dim[D], unsigned long flags, long pos[D], md_loop_fun_t fun)
{
if (0 == D) {
NESTED_CALL(fun, (pos));
return;
}
D--;
if (!MD_IS_SET(flags, D)) {
for (pos[D] = 0; pos[D] < dim[D]; pos[D]++)
md_loop_r(D, dim, flags, pos, fun);
} else {
md_loop_r(D, dim, flags, pos, fun);
}
}
/**
* Generic function which loops over all dimensions and calls a given
* function passing the current indices as argument.
*
* Runs fun(data, position) for all position in dim
*
*/
void md_parallel_loop(int D, const long dim[static D], unsigned long flags, md_loop_fun_t fun)
{
flags &= md_nontriv_dims(D, dim);
long pdims[D];
md_select_dims(D, flags, pdims, dim);
long iter = md_calc_size(D, pdims);
#pragma omp parallel for
for (long i = 0; i < iter; i++) {
// Recover place in parallel iteration space
long pos[D];
long ii = i;
for (int j = 0; j < D; j++) {
pos[j] = ii % pdims[j];
ii /= pdims[j];
}
md_loop_r(D, dim, flags, pos, fun);
}
}
/**
* Generic function which loops over all dimensions and calls a given
* function passing the current indices as argument.
*
* Runs fun( position ) for all position in dim
*
*/
void md_loop(int D, const long dim[D], md_loop_fun_t fun)
{
long pos[D];
md_loop_r(D, dim, 0, pos, fun);
}
/**
* Computes the next position. Returns true until last index.
*/
bool md_next(int D, const long dims[D], unsigned long flags, long pos[D])
{
if (0 == D--)
return false;
if (md_next(D, dims, flags, pos))
return true;
if (MD_IS_SET(flags, D)) {
assert((0 <= pos[D]) && (pos[D] < dims[D]));
if (++pos[D] < dims[D])
return true;
pos[D] = 0;
}
return false;
}
/**
* Returns offset for position in a multidimensional array
*
* return pos[0]*strides[0] + ... + pos[D-1]*strides[D-1]
*
* @param D number of dimensions
* @param dim dimensions array
*/
long md_calc_offset(int D, const long strides[D], const long position[D])
{
long pos = 0;
for (int i = 0; i < D; i++)
pos += strides[i] * position[i];
return pos;
}
static long md_calc_size_r(int D, const long dim[D], size_t size)
{
if (0 == D)
return size;
return md_calc_size_r(D - 1, dim, size * dim[D - 1]);
}
/**
* Returns the number of elements
*
* return dim[0]*dim[1]*...*dim[D-1]
*
* @param D number of dimensions
* @param dim dimensions array
*/
long md_calc_size(int D, const long dim[D])
{
return md_calc_size_r(D, dim, 1);
}
/**
* Computes the number of smallest dimensions which are stored
* contineously, i.e. can be accessed as a block of memory.
*
*/
int md_calc_blockdim(int D, const long dim[D], const long str[D], size_t size)
{
long dist = size;
int i = 0;
for (i = 0; i < D; i++) {
if (!((str[i] == dist) || (dim[i] == 1)))
break;
dist *= dim[i];
}
return i;
}
/**
* Copy dimensions specified by flags and set remaining dimensions to 1
*
* odims = [ 1 idims[1] idims[2] 1 1 idims[5] ]
*
* @param D number of dimensions
* @param flags bitmask specifying which dimensions to copy
* @param odims output dimensions
* @param idims input dimensions
*/
void md_select_dims(int D, unsigned long flags, long odims[D], const long idims[D])
{
md_copy_dims(D, odims, idims);
for (int i = 0; i < D; i++)
if (!MD_IS_SET(flags, i))
odims[i] = 1;
}
/**
* Copy dimensions
*
* odims[i] = idims[i]
*/
void md_copy_dims(int D, long odims[D], const long idims[D])
{
memcpy(odims, idims, D * sizeof(long));
}
/**
* Copy strides
*
* ostrs[i] = istrs[i]
*/
void md_copy_strides(int D, long ostrs[D], const long istrs[D])
{
memcpy(ostrs, istrs, D * sizeof(long));
}
/**
* Set all dimensions to value
*
* dims[i] = val
*/
void md_set_dims(int D, long dims[D], long val)
{
for (int i = 0; i < D; i++)
dims[i] = val;
}
/**
* returns whether or not @param pos is a valid index of an array of dimension @param dims
*/
bool md_is_index(int D, const long pos[D], const long dims[D])
{
if (D == 0)
return true;
return ((pos[0] >= 0) && (pos[0] < dims[0]) && md_is_index(D - 1, pos + 1, dims + 1));
}
/**
* return whether some other dimensions are >1
*/
bool md_check_dimensions(int N, const long dims[N], unsigned long flags)
{
long d[N];
md_select_dims(N, ~flags, d, dims);
return (1 != md_calc_size(N, d));
}
/**
* Check if dimensions at 'flags' position are equal
*/
bool md_check_equal_dims(int N, const long dims1[N], const long dims2[N], unsigned long flags)
{
return ( md_check_bounds(N, flags, dims1, dims2)
&& md_check_bounds(N, flags, dims2, dims1));
}
/*
* compute non-trivial (> 1) dims
*/
unsigned long md_nontriv_dims(int D, const long dims[D])
{
unsigned long flags = 0;
for (int i = 0; i < D; i++)
if (dims[i] > 1)
flags = MD_SET(flags, i);
return flags;
}
/*
* compute non-trivial (!= 0) strides
*/
unsigned long md_nontriv_strides(int D, const long strs[D])
{
unsigned long flags = 0;
for (int i = 0; i < D; i++)
if (strs[i] != 0)
flags = MD_SET(flags, i);
return flags;
}
/**
* Set all dimensions to one
*
* dims[i] = 1
*/
void md_singleton_dims(int D, long dims[D])
{
for (int i = 0; i < D; i++)
dims[i] = 1;
}
/**
* Set all strides to one
*
* dims[i] = 1
*/
void md_singleton_strides(int D, long strs[D])
{
for (int i = 0; i < D; i++)
strs[i] = 0;
}
/**
* Check dimensions for compatibility. Dimensions must be equal or
* where indicated by a set bit in flags one must be equal to one
* in atleast one of the arguments.
*/
bool md_check_compat(int D, unsigned long flags, const long dim1[D], const long dim2[D])
{
if (0 == D)
return true;
D--;
if ((dim1[D] == dim2[D]) || (MD_IS_SET(flags, D) && ((1 == dim1[D]) || (1 == dim2[D]))))
return md_check_compat(D, flags, dim1, dim2);
return false;
}
void md_merge_dims(int N, long out_dims[N], const long dims1[N], const long dims2[N])
{
assert(md_check_compat(N, ~0UL, dims1, dims2));
for (int i = 0; i < N; i++)
out_dims[i] = (1 == dims1[i]) ? dims2[i] : dims1[i];
}
/**
* dim1 must be bounded by dim2 where a bit is set
*/
bool md_check_bounds(int D, unsigned long flags, const long dim1[D], const long dim2[D])
{
if (0 == D--)
return true;
if (!MD_IS_SET(flags, D) || (dim1[D] <= dim2[D]))
return md_check_bounds(D, flags, dim1, dim2);
return false;
}
/**
* Set the output's flagged dimensions to the minimum of the two input dimensions
*
* odims = [ MIN(idims1[0],idims2[0] ... MIN(idims1[D-1],idims2[D-1]) ]
*
* @param D number of dimensions
* @param flags bitmask specifying which dimensions to minimize
* @param odims output dimensions
* @param idims1 input 1 dimensions
* @param idims2 input 2 dimensions
*/
void md_min_dims(int D, unsigned long flags, long odims[D], const long idims1[D], const long idims2[D])
{
for (int i = 0; i < D; i++)
if (MD_IS_SET(flags, i))
odims[i] = MIN(idims1[i], idims2[i]);
}
/**
* Set the output's flagged dimensions to the maximum of the two input dimensions
*
* odims = [ MAX(idims1[0],idims2[0] ... MAX(idims1[D-1],idims2[D-1]) ]
*
* @param D number of dimensions
* @param flags bitmask specifying which dimensions to maximize
* @param odims output dimensions
* @param idims1 input 1 dimensions
* @param idims2 input 2 dimensions
*/
void md_max_dims(int D, unsigned long flags, long odims[D], const long idims1[D], const long idims2[D])
{
for (int i = 0; i < D; i++)
if (MD_IS_SET(flags, i))
odims[i] = MAX(idims1[i], idims2[i]);
}
/**
* Zero out array (with strides)
*
* ptr[i] = 0
*/
void md_clear2(int D, const long dim[D], const long str[D], void* ptr, size_t size)
{
const long (*nstr[1])[D] = { (const long (*)[D])str };
#ifdef USE_CUDA
bool use_gpu = cuda_ondevice(ptr);
#endif
unsigned long flags = 0;
for (int i = 0; i < D; i++)
if (0 == str[i])
flags |= MD_BIT(i);
long dim2[D];
md_select_dims(D, ~flags, dim2, dim);
NESTED(void, nary_clear, (struct nary_opt_data_s* opt_data, void* ptr[]))
{
size_t size2 = size * opt_data->size;
#ifdef USE_CUDA
if (use_gpu) {
cuda_clear(size2, ptr[0]);
return;
}
#endif
memset(ptr[0], 0, size2);
};
optimized_nop(1, MD_BIT(0), D, dim2, nstr, (void*[1]){ ptr }, (size_t[1]){ size }, nary_clear);
}
/**
* Calculate strides in column-major format
* (smallest index is sequential)
*
* @param D number of dimensions
* @param array of calculates strides
* @param dim array of dimensions
* @param size of a single element
*/
long* md_calc_strides(int D, long str[D], const long dim[D], size_t size)
{
long old = size;
for (int i = 0; i < D; i++) {
str[i] = (1 == dim[i]) ? 0 : old;
old *= dim[i];
}
return str;
}
/**
* Zero out array (without strides)
*
* ptr[i] = 0
*
* @param D number of dimensions
* @param dim dimensions array
* @param ptr pointer to data to clear
* @param size sizeof()
*/
void md_clear(int D, const long dim[D], void* ptr, size_t size)
{
md_clear2(D, dim, MD_STRIDES(D, dim, size), ptr, size);
}
/**
* Copy array (with strides)
*
* optr[i] = iptr[i]
*/
void md_copy2(int D, const long dim[D], const long ostr[D], void* optr, const long istr[D], const void* iptr, size_t size)
{
#if 0
// this is for a fun comparison between our copy engine and FFTW
extern void fft2(unsigned int D, const long dim[D], unsigned int flags,
const long ostr[D], void* optr, const long istr[D], const void* iptr);
if (sizeof(complex float) == size)
fft2(D, dim, 0, ostr, optr, istr, iptr);
#endif
#ifdef USE_CUDA
bool use_gpu = cuda_ondevice(optr) || cuda_ondevice(iptr);
#if 1
long tostr[D];
long tistr[D];
long tdims[D];
md_copy_strides(D, tostr, ostr);
md_copy_strides(D, tistr, istr);
md_copy_dims(D, tdims, dim);
long (*nstr2[2])[D] = { &tostr, &tistr };
int ND = optimize_dims_gpu(2, D, tdims, nstr2);
assert(ND <= D);
#if 1
// permute dims with 0 input strides or negative in/output strides to the end
// these might be permuted to the inner dimensions by optimize_dims and break the strided copy
int perm[ND];
for (int i = 0, j = 0; i < ND; i++) {
if ( (0 >= (*nstr2[1])[i])
|| (0 >= (*nstr2[0])[i])) {
perm[ND - 1 -j] = i;
j += 1;
} else {
perm[i - j] = i;
}
}
long tmp[ND];
md_permute_dims(ND, perm, tmp, tdims);
md_copy_dims(ND, tdims, tmp);
md_permute_dims(ND, perm, tmp, tostr);
md_copy_dims(ND, tostr, tmp);
md_permute_dims(ND, perm, tmp, tistr);
md_copy_dims(ND, tistr, tmp);
#endif
#ifdef USE_CUDA
if (use_gpu && (cuda_get_device_num(optr) == cuda_get_device_num(iptr)) && ND <= 7) {
cuda_copy_ND(ND, tdims, tostr, optr, tistr, iptr, size);
return;
}
#endif
#if 1
//fill like copies
unsigned long fill_flags = md_nontriv_dims(D, tdims)
& ~md_nontriv_strides(D, tistr)
& md_nontriv_strides(D, tostr);
if (use_gpu && (0 != fill_flags)) {
int idx = md_min_idx(fill_flags);
long tdims2[ND];
long pos[ND];
md_select_dims(ND, ~MD_BIT(idx), tdims2, tdims);
md_singleton_strides(ND, pos);
md_copy2(ND, tdims2, tostr, optr, tistr, iptr, size);
pos[idx] = 1;
while (pos[idx] < tdims[idx]) {
tdims2[idx] = MIN(pos[idx], tdims[idx] - pos[idx]);
md_copy2(ND, tdims2, tostr, optr + md_calc_offset(ND, tostr, pos), tostr, optr, size);
pos[idx] += tdims2[idx];
}
return;
}
#endif
size_t sizes[2] = { size, size };
int skip = min_blockdim(2, ND, tdims, nstr2, sizes);
debug_printf(DP_DEBUG4, "md_copy_2 skip=%d\n", skip);
debug_print_dims(DP_DEBUG4, ND, tdims);
debug_print_dims(DP_DEBUG4, ND, (*nstr2[0]));
debug_print_dims(DP_DEBUG4, ND, (*nstr2[1]));
if ( use_gpu
&& (ND - skip > 0)) {
assert(skip < ND);
long ostr2 = (*nstr2[0])[skip];
long istr2 = (*nstr2[1])[skip];
if (!( (ostr2 > 0)
&& (istr2 > 0)))
goto out;
void* nptr[2] = { optr, (void*)iptr };
long sizes[2] = { md_calc_size(skip, tdims) * size, tdims[skip] };
skip++;
const long* nstr[2] = { *nstr2[0] + skip, *nstr2[1] + skip };
long* sizesp = sizes; // because of clang
void** nptrp = nptr;
NESTED(void, nary_strided_copy, (void* ptr[]))
{
debug_printf(DP_DEBUG4, "CUDA 2D copy %ld %ld %ld %ld %ld %ld\n",
sizesp[0], sizesp[1], ostr2, istr2, nptrp[0], nptrp[1]);
cuda_memcpy_strided(sizesp, ostr2, ptr[0], istr2, ptr[1]);
};
md_nary(2, ND - skip, tdims + skip, nstr, nptr, nary_strided_copy);
return;
}
out: ;
#endif
#endif
const long (*nstr[2])[D] = { (const long (*)[D])ostr, (const long (*)[D])istr };
NESTED(void, nary_copy, (struct nary_opt_data_s* opt_data, void* ptr[]))
{
size_t size2 = size * opt_data->size;
#ifdef USE_CUDA
if (use_gpu) {
cuda_memcpy(size2, ptr[0], ptr[1]);
return;
}
#endif
memcpy(ptr[0], ptr[1], size2);
};
optimized_nop(2, MD_BIT(0), D, dim, nstr, (void*[2]){ optr, (void*)iptr }, (size_t[2]){ size, size }, nary_copy);
}
/**
* Copy array (without strides)
*
* optr[i] = iptr[i]
*/
void md_copy(int D, const long dim[D], void* optr, const void* iptr, size_t size)
{
long str[D];
md_calc_strides(D, str, dim, size);
md_copy2(D, dim, str, optr, str, iptr, size);
}
#ifdef USE_CUDA
// copied from flpmath.c
static void* gpu_constant(const void* vp, size_t size)
{
return md_gpu_move(1, (long[1]){ 1 }, vp, size);
}
#endif
/**
* Fill array with value pointed by pointer (with strides)
*
* ptr[i] = iptr[0]
*/
void md_fill2(int D, const long dim[D], const long str[D], void* ptr, const void* iptr, size_t size)
{
#ifdef USE_CUDA
if (cuda_ondevice(ptr) && (!cuda_ondevice(iptr))) {
void* giptr = gpu_constant(iptr, size);
md_fill2(D, dim, str, ptr, giptr, size);
md_free(giptr);
return;
}
#endif
long istr[D];
md_singleton_strides(D, istr);
md_copy2(D, dim, str, ptr, istr, iptr, size);
}
/**
* Fill array with value pointed by pointer (without strides)
*
* ptr[i] = iptr[0]
*/
void md_fill(int D, const long dim[D], void* ptr, const void* iptr, size_t size)
{
md_fill2(D, dim, MD_STRIDES(D, dim, size), ptr, iptr, size);
}
/**
* Swap values between a number of arrays (with strides)
*/
void md_circular_swap2(int M, int D, const long dims[D], const long* strs[M], void* ptr[M], size_t size)
{
size_t sizes[M];
for (int i = 0; i < M; i++)
sizes[i] = size;
const long (*nstrs[M])[D];
for (int i = 0; i < M; i++)
nstrs[i] = (const long (*)[D])strs[i];
NESTED(void, nary_swap, (struct nary_opt_data_s* opt_data, void* ptr[]))
{
size_t size2 = size * opt_data->size;
char* tmp = (size2 < 32) ? alloca(size2) : xmalloc(size2);
#ifdef USE_CUDA
assert(!cuda_ondevice(ptr[0]));
assert(!cuda_ondevice(ptr[1]));
#endif
memcpy(tmp, ptr[0], size2);
for (int i = 0; i < M - 1; i++)
memcpy(ptr[i], ptr[i + 1], size2);
memcpy(ptr[M - 1], tmp, size2);
if (size2 >= 32)
xfree(tmp);
};
optimized_nop(M, (1 << M) - 1, D, dims, nstrs, ptr, sizes, nary_swap);
}
/**
* Swap values between a number of arrays
*/
void md_circular_swap(int M, int D, const long dims[D], void* ptr[M], size_t size)
{
long strs[M][D];
md_calc_strides(D, strs[0], dims, size);
const long* strp[M];
strp[0] = strs[0];
for (int i = 1; i < M; i++) {
md_copy_strides(D, strs[i], strs[0]);
strp[i] = strs[i];
}
md_circular_swap2(M, D, dims, strp, ptr, size);
}
/**
* Swap values between two arrays (with strides)
*
* iptr[i] = optr[i] and optr[i] = iptr[i]
*/
void md_swap2(int D, const long dim[D], const long ostr[D], void* optr, const long istr[D], void* iptr, size_t size)
{
md_circular_swap2(2, D, dim, (const long*[2]){ ostr, istr }, (void*[2]){ optr, iptr }, size);
}
/**
* Swap values between two arrays (without strides)
*
* iptr[i] = optr[i] and optr[i] = iptr[i]
*/
void md_swap(int D, const long dim[D], void* optr, void* iptr, size_t size)
{
long str[D];
md_calc_strides(D, str, dim, size);
md_swap2(D, dim, str, optr, str, iptr, size);
}
/**
* Move a block from an array to another array (with strides)
*
*/
void md_move_block2(int D, const long dim[D], const long opos[D], const long odim[D], const long ostr[D], void* optr, const long ipos[D], const long idim[D], const long istr[D], const void* iptr, size_t size)
{
for (int i = 0; i < D; i++) {
assert(dim[i] <= odim[i]);
assert(dim[i] <= idim[i]);
assert((0 <= opos[i]) && (opos[i] <= odim[i] - dim[i]));
assert((0 <= ipos[i]) && (ipos[i] <= idim[i] - dim[i]));
}
long ioff = md_calc_offset(D, istr, ipos);
long ooff = md_calc_offset(D, ostr, opos);
md_copy2(D, dim, ostr, optr + ooff, istr, iptr + ioff, size);
}
/**
* Move a block from an array to another array (without strides)
*
*/
void md_move_block(int D, const long dim[D], const long opos[D], const long odim[D], void* optr, const long ipos[D], const long idim[D], const void* iptr, size_t size)
{
md_move_block2(D, dim,
opos, odim, MD_STRIDES(D, odim, size), optr,
ipos, idim, MD_STRIDES(D, idim, size), iptr, size);
}
/**
* Copy a block from an array to another array (with strides)
*
* Block dimensions are min(idim , odim)
*
* if idim[d] > odim[d], then optr[i] = iptr[pos + i] for 0 <= i < odim[d]
*
* if idim[d] < odim[d], then optr[pos + i] = iptr[i] for 0 <= i < idim[d]
*
*/
void md_copy_block2(int D, const long pos[D], const long odim[D], const long ostr[D], void* optr, const long idim[D], const long istr[D], const void* iptr, size_t size)
{
long dim[D];
long ipos[D];
long opos[D];
for (int i = 0; i < D; i++) {
assert((idim[i] != odim[i]) || (0 == pos[i]));
dim[i] = MIN(odim[i], idim[i]);
ipos[i] = 0;
opos[i] = 0;
if (idim[i] != dim[i])
ipos[i] = pos[i];
if (odim[i] != dim[i])
opos[i] = pos[i];
}
md_move_block2(D, dim, opos, odim, ostr, optr, ipos, idim, istr, iptr, size);
}
/**
* Copy a block from an array to another array (without strides)
*
* Block dimensions are min(idim , odim)
*
* if idim[d] > odim[d], then optr[i] = iptr[pos + i] for 0 <= i < odim[d]
*
* if idim[d] < odim[d], then optr[pos + i] = iptr[i] for 0 <= i < idim[d]
*
*/
void md_copy_block(int D, const long pos[D], const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
md_copy_block2(D, pos,
odim, MD_STRIDES(D, odim, size), optr,
idim, MD_STRIDES(D, idim, size), iptr, size);
}
/**
* Resize an array by zero-padding or by truncation at the end.
*
* optr = [iptr 0 0 0 0]
*
*/
void md_resize(int D, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
long pos[D];
memset(pos, 0, D * sizeof(long));
for (int i = 0; i < D; i++) {
if (odim[i] > idim[i]) {
md_clear(D, odim, optr, size);
break;
}
}
md_copy_block(D, pos, odim, optr, idim, iptr, size);
}
/**
* Pad an array by val at the end.
*
* optr = [iptr val val val val]
*
*/
void md_pad(int D, const void* val, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
long pos[D];
memset(pos, 0, D * sizeof(long));
md_fill(D, odim, optr, val, size);
md_copy_block(D, pos, odim, optr, idim, iptr, size);
}
/**
* Resize an array by zero-padding or by truncation at both ends symmetrically.
*
* optr = [0 0 iptr 0 0]
*
*/
void md_resize_center(int D, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
// the definition of the center position corresponds
// to the one used in the FFT.
long pos[D];
for (int i = 0; i < D; i++)
pos[i] = labs((odim[i] / 2) - (idim[i] / 2));
for (int i = 0; i < D; i++) {
if (odim[i] > idim[i]) {
md_clear(D, odim, optr, size);
break;
}
}
md_copy_block(D, pos, odim, optr, idim, iptr, size);
}
/**
* Pad an array on both ends by val.
*
* optr = [val val iptr val val]
*
*/
void md_pad_center(int D, const void* val, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
long pos[D];
for (int i = 0; i < D; i++)
pos[i] = labs((odim[i] / 2) - (idim[i] / 2));
md_fill(D, odim, optr, val, size);
md_copy_block(D, pos, odim, optr, idim, iptr, size);
}
void md_reflectpad_center2(int D, const long odim[D], const long ostr[D], void* optr,
const long idim[D], const long istr[D], const void* iptr, size_t size)
{
long odim2[D];
long ristr[D];
long loop_idx[D];
long blockdim[D];
long center_block[D];
long block0_size[D];
long count = 0;
for (int i = 0; i < D; i++) {
assert(odim[i] >= idim[i]);
blockdim[i] = 1;
center_block[i] = 0;
ristr[i] = istr[i];
odim2[i] = idim[i];
block0_size[i] = 0;
if (odim[i] > idim[i]) {
loop_idx[count++] = i;
long main_start = labs((odim[i] / 2) - (idim[i] / 2));
long main_end = main_start + idim[i];
long before = (main_start + idim[i] - 1) / idim[i];
long after = (odim[i] - main_end + idim[i] - 1) / idim[i];
blockdim[i] = 1 + before + after;
center_block[i] = before;
long x = main_start % idim[i];
block0_size[i] = (0 == x) ? idim[i] : x;
}
}
long block_pos[D];
long in_pos[D];
md_set_dims(D, block_pos, 0);
md_set_dims(D, in_pos, 0);
long opos[D];
md_set_dims(D, opos, 0);
do {
for (int i = 0, idx = loop_idx[0]; i < count; idx = (++i < count) ? loop_idx[i] : idx) {
opos[idx] = (block_pos[idx] >= 1) ? (block0_size[idx] + idim[idx] * (block_pos[idx] - 1)) : 0;
odim2[idx] = (block_pos[idx] == 0) ? block0_size[idx] : MIN(idim[idx], odim[idx] - opos[idx]);
if (1 == labs(center_block[idx] - block_pos[idx]) % 2) {
ristr[idx] = -istr[idx];
in_pos[idx] = (odim2[idx] < idim[idx]) ? ((block_pos[idx] > center_block[idx]) ? (idim[idx] - 1) : odim2[idx] - 1) : (idim[idx] - 1);
} else {
ristr[idx] = istr[idx];
in_pos[idx] = (odim2[idx] < idim[idx]) ? ((block_pos[idx] > center_block[idx]) ? 0 : (idim[idx] - odim2[idx])) : 0;
}
}
md_copy2(D, odim2, ostr, md_calc_offset(D, ostr, opos) + optr, ristr, md_calc_offset(D, istr, in_pos) + iptr, size);
} while (md_next(D, blockdim, ~0U, block_pos));
}
void md_reflectpad_center(int D, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
md_reflectpad_center2(D, odim, MD_STRIDES(D, odim, size), optr,
idim, MD_STRIDES(D, idim, size), iptr, size);
}
/**
* Extract slice from array specified by flags (with strides)
*
* optr = iptr(pos[0], :, pos[2], :, :)
*
*/
void md_slice2(int D, unsigned long flags, const long pos[D], const long dim[D], const long ostr[D], void* optr, const long istr[D], const void* iptr, size_t size)
{
long odim[D];
md_select_dims(D, ~flags, odim, dim);
md_copy_block2(D, pos, odim, ostr, optr, dim, istr, iptr, size);
}
/**
* Extract slice from array specified by flags (with strides)
*
* optr = iptr(pos[0], :, pos[2], :, :)
*
*/
void md_slice(int D, unsigned long flags, const long pos[D], const long dim[D], void* optr, const void* iptr, size_t size)
{
long odim[D];
md_select_dims(D, ~flags, odim, dim);
md_slice2(D, flags, pos, dim,
MD_STRIDES(D, odim, size), optr,
MD_STRIDES(D, dim, size), iptr, size);
}
/**
* Permute array (with strides)
*
* optr[order[i]] = iptr[i]
*
*/
void md_permute2(int D, const int order[D], const long odims[D], const long ostr[D], void* optr, const long idims[D], const long istr[D], const void* iptr, size_t size)
{
unsigned long flags = 0;
long ostr2[D];
for (int i = 0; i < D; i++) {
assert(order[i] < D);
assert(odims[i] == idims[order[i]]);
flags = MD_SET(flags, order[i]);
ostr2[order[i]] = ostr[i];
}
assert(MD_BIT(D) == flags + 1U);
md_copy2(D, idims, ostr2, optr, istr, iptr, size);
}
/**
* Permute array (without strides)
*
* optr[order[i]] = iptr[i]
*
*/
void md_permute(int D, const int order[D], const long odims[D], void* optr, const long idims[D], const void* iptr, size_t size)
{
md_permute2(D, order,
odims, MD_STRIDES(D, odims, size), optr,
idims, MD_STRIDES(D, idims, size), iptr, size);
}
/**
* Permute dimensions
*
*
*/
void md_permute_dims(int D, const int order[D], long odims[D], const long idims[D])
{
for (int i = 0; i < D; i++)
odims[i] = idims[order[i]];
}
static void md_transpose_order(int D, int order[D], int dim1, int dim2)
{
assert(dim1 < D);
assert(dim2 < D);
for (int i = 0; i < D; i++)
order[i] = i;
order[dim1] = dim2;
order[dim2] = dim1;
}
/**
* Transpose dimensions
*
*
*/
void md_transpose_dims(int D, int dim1, int dim2, long odims[D], const long idims[D])
{
int order[D];
md_transpose_order(D, order, dim1, dim2);
md_permute_dims(D, order, odims, idims);
}
/**
* Tranpose array (with strides)
*
* optr[dim2] = iptr[dim1]
*
* optr[dim1] = iptr[dim2]
*
*/
void md_transpose2(int D, int dim1, int dim2, const long odims[D], const long ostr[D], void* optr, const long idims[D], const long istr[D], const void* iptr, size_t size)
{
for (int i = 0; i < D; i++)
if ((i != dim1) && (i != dim2))
assert(odims[i] == idims[i]);
assert(odims[dim1] == idims[dim2]);
assert(odims[dim2] == idims[dim1]);
int order[D];
md_transpose_order(D, order, dim1, dim2);
md_permute2(D, order, odims, ostr, optr, idims, istr, iptr, size);
}
/**
* Tranpose array (without strides)
*
* optr[dim2] = iptr[dim1]
*
* optr[dim1] = iptr[dim2]
*
*/
void md_transpose(int D, int dim1, int dim2, const long odims[D], void* optr, const long idims[D], const void* iptr, size_t size)
{
md_transpose2(D, dim1, dim2,
odims, MD_STRIDES(D, odims, size), optr,
idims, MD_STRIDES(D, idims, size), iptr, size);
}
static void md_flip_inpl2(int D, const long dims[D], unsigned long flags, const long str[D], void* ptr, size_t size);
/**
* Swap input and output while flipping selected dimensions
* at the same time.
*/
void md_swap_flip2(int D, const long dims[D], unsigned long flags, const long ostr[D], void* optr, const long istr[D], void* iptr, size_t size)
{
#if 1
int i;
for (i = D - 1; i >= 0; i--)
if ((1 != dims[i]) && MD_IS_SET(flags, i))
break;
if (-1 == i) {
md_swap2(D, dims, ostr, optr, istr, iptr, size);
return;
}
assert(1 < dims[i]);
assert(ostr[i] != 0);
assert(istr[i] != 0);
long dims2[D];
md_copy_dims(D, dims2, dims);
dims2[i] = dims[i] / 2;
long off = (dims[i] + 1) / 2;
assert(dims2[i] + off == dims[i]);
md_swap_flip2(D, dims2, flags, ostr, optr, istr, iptr + off * istr[i], size);
md_swap_flip2(D, dims2, flags, ostr, optr + off * ostr[i], istr, iptr, size);
// odd, swap center plane
// (we should split in three similar sized chunks instead)
dims2[i] = 1;
if (1 == dims[i] % 2)
md_swap_flip2(D, dims2, flags, ostr, optr + (off - 1) * ostr[i], istr, iptr + (off - 1) * istr[i], size);
#else
// simpler, but more swaps
md_swap2(D, dims, ostr, optr, istr, iptr, size);
md_flip_inpl2(D, dims, flags, ostr, optr, size);
md_flip_inpl2(D, dims, flags, istr, iptr, size);
#endif
}
/**
* Swap input and output while flipping selected dimensions
* at the same time.
*/
void md_swap_flip(int D, const long dims[D], unsigned long flags, void* optr, void* iptr, size_t size)
{
long strs[D];
md_calc_strides(D, strs, dims, size);
md_swap_flip2(D, dims, flags, strs, optr, strs, iptr, size);
}
static void md_flip_inpl2(int D, const long dims[D], unsigned long flags, const long str[D], void* ptr, size_t size)
{
int i;
for (i = D - 1; i >= 0; i--)
if ((1 != dims[i]) && MD_IS_SET(flags, i))
break;
if (-1 == i)
return;
assert(1 < dims[i]);
assert(str[i] != 0);
long dims2[D];
md_copy_dims(D, dims2, dims);
dims2[i] = dims[i] / 2;
long off = str[i] * (0 + (dims[i] + 1) / 2);
md_swap_flip2(D, dims2, flags, str, ptr, str, ptr + off, size);
}
/**
* Flip array (with strides)
*
* optr[dims[D] - 1 - i] = iptr[i]
*
*/
void md_flip2(int D, const long dims[D], unsigned long flags, const long ostr[D], void* optr, const long istr[D], const void* iptr, size_t size)
{
if (optr == iptr) {
assert(ostr == istr);
md_flip_inpl2(D, dims, flags, ostr, optr, size);
return;
}
long off = 0;
long ostr2[D];
for (int i = 0; i < D; i++) {
ostr2[i] = ostr[i];
if (MD_IS_SET(flags, i)) {
ostr2[i] = -ostr[i];
off += (dims[i] - 1) * ostr[i];
}
}
md_copy2(D, dims, ostr2, optr + off, istr, iptr, size);
}
/**
* Flip array (without strides)
*
* optr[dims[D] - 1 - i] = iptr[i]
*
*/
void md_flip(int D, const long dims[D], unsigned long flags, void* optr, const void* iptr, size_t size)
{
long str[D];
md_calc_strides(D, str, dims, size);
md_flip2(D, dims, flags, str, optr, str, iptr, size);
}
/**
* Reshape array (with strides)
*
* Only flagged dims may flow
*/
void md_reshape2(int D, unsigned long flags, const long odims[D], const long ostrs[D], void* optr, const long idims[D], const long istrs[D], const void* iptr, size_t size)
{
assert(md_calc_size(D, odims) == md_calc_size(D, idims));
assert(md_check_equal_dims(D, odims, idims, ~flags));
int order[D];
int j = 0;
for (int i = 0; i < D; i++)
if (MD_IS_SET(flags, i))
order[j++] = i;
for (int i = 0; i < D; i++)
if (!MD_IS_SET(flags, i))
order[j++] = i;
assert(D == j);
int iorder[D];
for (int i = 0; i < D; i++)
iorder[order[i]] = i;
long dims2[D];
long strs2[D];
// FIXME: we could avoid the buffer in some cases
void* buf = md_alloc_sameplace(D, odims, size, optr);
md_permute_dims(D, order, dims2, idims);
md_calc_strides(D, strs2, dims2, size);
md_permute2(D, order, dims2, strs2, buf, idims, istrs, iptr, size);
md_permute_dims(D, order, dims2, odims);
md_calc_strides(D, strs2, dims2, size);
md_permute2(D, iorder, odims, ostrs, optr, dims2, strs2, buf, size);
md_free(buf);
}
/**
* Reshape array (without strides)
*
* Only flagged dims may flow
*/
void md_reshape(int D, unsigned long flags, const long odims[D], void* optr, const long idims[D], const void* iptr, size_t size)
{
assert(md_calc_size(D, odims) == md_calc_size(D, idims));
assert(md_check_equal_dims(D, odims, idims, ~flags));
long ostrs[D];
md_calc_strides(D, ostrs, odims, size);
long istrs[D];
md_calc_strides(D, istrs, idims, size);
if (md_check_equal_dims(D, ostrs, istrs, ~flags)) { // strides consistent!
md_copy(D, odims, optr, iptr, size);
} else {
md_reshape2(D, flags, odims, ostrs, optr, idims, istrs, iptr, size);
}
}
bool md_compare2(int D, const long dims[D], const long str1[D], const void* src1,
const long str2[D], const void* src2, size_t size)
{
__block bool eq = true;
const long (*nstr[2])[D] = { (const long (*)[D])str1, (const long (*)[D])str2 };
NESTED(void, nary_cmp, (struct nary_opt_data_s* opt_data, void* ptrs[]))
{
size_t size2 = size * opt_data->size;
bool eq2 = (0 == memcmp(ptrs[0], ptrs[1], size2));
#pragma omp atomic
eq &= eq2;
};
optimized_nop(2, 0u, D, dims, nstr, (void*[2]){ (void*)src1, (void*)src2 }, (size_t[2]){ size, size }, nary_cmp);
return eq;
}
bool md_compare(int D, const long dims[D], const void* src1, const void* src2, size_t size)
{
long str[D];
md_calc_strides(D, str, dims, size);
return md_compare2(D, dims, str, src1, str, src2, size);
}
static void md_septrafo_r(int D, unsigned int R, long dimensions[D], unsigned long flags, const long strides[D], void* ptr, md_trafo_fun_t fun)
{
if (0 == R--)
return;
md_septrafo_r(D, R, dimensions, flags, strides, ptr, fun);
if (MD_IS_SET(flags, R)) {
void* nptr[1] = { ptr };
const long* nstrides[1] = { strides };
long dimsR = dimensions[R];
long strsR = strides[R]; // because of clang
dimensions[R] = 1; // we made a copy in md_septrafo2
NESTED(void, nary_septrafo, (void* ptr[]))
{
fun(dimsR, strsR, ptr[0]);
};
//md_nary_parallel(1, D, dimensions, nstrides, nptr, &data, nary_septrafo);
md_nary(1, D, dimensions, nstrides, nptr, nary_septrafo);
dimensions[R] = dimsR;
}
}
/**
* Apply a separable transformation along selected dimensions.
*
*/
void md_septrafo2(int D, const long dimensions[D], unsigned long flags, const long strides[D], void* ptr, md_trafo_fun_t fun)
{
long dimcopy[D];
md_copy_dims(D, dimcopy, dimensions);
md_septrafo_r(D, D, dimcopy, flags, strides, ptr, fun);
}
/**
* Apply a separable transformation along selected dimensions.
*
*/
void md_septrafo(int D, const long dims[D], unsigned long flags, void* ptr, size_t size, md_trafo_fun_t fun)
{
md_septrafo2(D, dims, flags, MD_STRIDES(D, dims, size), ptr, fun);
}
/**
* Copy diagonals from array specified by flags (with strides)
*
* dst(i, i, :, i, :) = src(i, i, :, i, :)
*
*/
void md_copy_diag2(int D, const long dims[D], unsigned long flags, const long str1[D], void* dst, const long str2[D], const void* src, size_t size)
{
long stride1 = 0;
long stride2 = 0;
long count = -1;
for (int i = 0; i < D; i++) {
if (MD_IS_SET(flags, i)) {
if (count < 0)
count = dims[i];
assert(dims[i] == count);
stride1 += str1[i];
stride2 += str2[i];
}
}
long xdims[D];
md_select_dims(D, ~flags, xdims, dims);
for (long i = 0; i < count; i++)
md_copy2(D, xdims, str1, dst + i * stride1, str2, src + i * stride2, size);
}
/**
* Copy diagonals from array specified by flags (without strides)
*
* dst(i ,i ,: ,i , :) = src(i ,i ,: ,i ,:)
*
*/
void md_copy_diag(int D, const long dims[D], unsigned long flags, void* dst, const void* src, size_t size)
{
long str[D];
md_calc_strides(D, str, dims, size);
md_copy_diag2(D, dims, flags, str, dst, str, src, size);
}
/**
* Fill diagonals specified by flags with value (without strides)
*
* dst(i, i, :, i, :) = src[0]
*
*/
void md_fill_diag(int D, const long dims[D], unsigned long flags, void* dst, const void* src, size_t size)
{
long str2[D];
md_singleton_strides(D, str2);
md_copy_diag2(D, dims, flags, MD_STRIDES(D, dims, size), dst, str2, src, size);
}
static void md_circ_shift_inpl2(int D, const long dims[D], const long center[D], const long strs[D], void* dst, size_t size)
{
#if 0
long dims1[D];
long dims2[D];
md_copy_dims(D, dims1, dims);
md_copy_dims(D, dims2, dims);
int i;
for (i = 0; i < D; i++) {
if (0 != center[i]) {
dims1[i] = center[i];
dims2[i] = dims[i] - center[i];
break;
}
}
if (i == D)
return;
long off = strs[i] * center[i];
// cool but slow, instead we want to have a chain of swaps
md_flip2(D, dims, MD_BIT(i), strs, dst, strs, dst, size);
md_flip2(D, dims1, MD_BIT(i), strs, dst, strs, dst, size);
md_flip2(D, dims2, MD_BIT(i), strs, dst + off, strs, dst + off, size);
// also not efficient, we want to merge the chain of swaps
long center2[D];
md_copy_dims(D, center2, center);
center2[i] = 0;
md_circ_shift_inpl2(D, dims, center2, strs, dst, size);
#else
// use tmp for now
int i;
for (i = 0; i < D; i++)
if (0 != center[i])
break;
if (i == D)
return;
long tmp_strs[D];
md_calc_strides(D, tmp_strs, dims, size);
void* tmp = md_alloc_sameplace(D, dims, size, dst);
md_copy2(D, dims, tmp_strs, tmp, strs, dst, size);
md_circ_shift2(D, dims, center, strs, dst, tmp_strs, tmp, size);
md_free(tmp);
#endif
}
/**
* Circularly shift array (with strides)
*
* dst[mod(i + center)] = src[i]
*
*/
void md_circ_shift2(int D, const long dimensions[D], const long center[D], const long str1[D], void* dst, const long str2[D], const void* src, size_t size)
{
long pos[D];
for (int i = 0; i < D; i++) { // FIXME: it would be better to calc modulo
pos[i] = center[i];
while (pos[i] < 0)
pos[i] += dimensions[i];
}
int i = 0; // FIXME :maybe we shoud search the other way?
while ((i < D) && (0 == pos[i]))
i++;
if (D == i) {
md_copy2(D, dimensions, str1, dst, str2, src, size);
return;
}
if (dst == src) {
assert(str1 == str2);
md_circ_shift_inpl2(D, dimensions, pos, str1, dst, size);
return;
}
long shift = pos[i];
assert(shift != 0);
long dim1[D];
long dim2[D];
md_copy_dims(D, dim1, dimensions);
md_copy_dims(D, dim2, dimensions);
dim1[i] = shift;
dim2[i] = dimensions[i] - shift;
assert((dim1[i] >= 0) && (dim2[i] >= 0));
pos[i] = 0;
//printf("%d: %ld %ld %d\n", i, dim1[i], dim2[i], sizeof(dimensions));
md_circ_shift2(D, dim1, pos, str1, dst, str2, src + dim2[i] * str2[i], size);
md_circ_shift2(D, dim2, pos, str1, dst + dim1[i] * str1[i], str2, src, size);
}
/**
* Circularly shift array (without strides)
*
* dst[mod(i + center)] = src[i]
*
*/
void md_circ_shift(int D, const long dimensions[D], const long center[D], void* dst, const void* src, size_t size)
{
long strides[D];
md_calc_strides(D, strides, dimensions, size);
md_circ_shift2(D, dimensions, center, strides, dst, strides, src, size);
}
/**
* Circularly extend array (with strides)
*
*/
void md_circ_ext2(int D, const long dims1[D], const long strs1[D], void* dst, const long dims2[D], const long strs2[D], const void* src, size_t size)
{
long ext[D];
for (int i = 0; i < D; i++) {
ext[i] = dims1[i] - dims2[i];
assert(ext[i] >= 0);
assert(ext[i] <= dims2[i]);
}
int i = 0; // FIXME :maybe we shoud search the other way?
while ((i < D) && (0 == ext[i]))
i++;
if (D == i) {
md_copy2(D, dims1, strs1, dst, strs2, src, size);
return;
}
long dims1_crop[D];
long dims2_crop[D];
long ext_dims[D];
md_copy_dims(D, dims1_crop, dims1);
md_copy_dims(D, dims2_crop, dims2);
md_copy_dims(D, ext_dims, dims1);
dims1_crop[i] = dims2[i];
dims2_crop[i] = ext[i];
ext_dims[i] = ext[i];
ext[i] = 0;
//printf("%d: %ld %ld %d\n", i, dim1[i], dim2[i], sizeof(dimensions));
md_circ_ext2(D, dims1_crop, strs1, dst, dims2, strs2, src, size);
md_circ_ext2(D, ext_dims, strs1, dst + dims2[i] * strs1[i], dims2_crop, strs2, src, size);
}
/**
* Circularly extend array (without strides)
*
*/
void md_circ_ext(int D, const long dims1[D], void* dst, const long dims2[D], const void* src, size_t size)
{
md_circ_ext2(D, dims1, MD_STRIDES(D, dims1, size), dst,
dims2, MD_STRIDES(D, dims2, size), src, size);
}
/**
* Periodically extend array (with strides)
*
*/
void md_periodic2(int D, const long dims1[D], const long strs1[D], void* dst, const long dims2[D], const long strs2[D], const void* src, size_t size)
{
long dims1B[2 * D];
long strs1B[2 * D];
long strs2B[2 * D];
for (int i = 0; i < D; i++) {
assert(0 == dims1[i] % dims2[i]);
// blocks
dims1B[2 * i + 0] = dims2[i];
strs1B[2 * i + 0] = strs1[i];
strs2B[2 * i + 0] = strs2[i];
// periodic copies
dims1B[2 * i + 0] = dims1[i] / dims2[i];
strs1B[2 * i + 0] = strs1[i] * dims2[i];
strs2B[2 * i + 0] = 0;
}
md_copy2(D, dims1B, strs1B, dst, strs2B, src, size);
}
/**
* Periodically extend array (without strides)
*
*/
void md_periodic(int D, const long dims1[D], void* dst, const long dims2[D], const void* src, size_t size)
{
md_periodic2(D, dims1, MD_STRIDES(D, dims1, size), dst,
dims2, MD_STRIDES(D, dims2, size), src, size);
}
void* md_compress(int D, const long dims[D], const float* src)
{
long N = md_calc_size(D, dims);
uint32_t* dst = md_alloc_sameplace(1, MD_DIMS(N / 32 + 1), sizeof(uint32_t), src);
#ifdef USE_CUDA
if (cuda_ondevice(src)) {
cuda_compress(N, dst, src);
return dst;
}
#endif
#pragma omp parallel for
for (long i = 0; i < N / 32 + 1; i++) {
uint32_t result = 0;
for (long j = 0; j < 32; j++) {
if (((32 * i + j) < N) && 0. != src[(32 * i + j)])
result = MD_SET(result, j);
}
dst[i] = result;
}
return dst;
}
void md_decompress(int D, const long dims[D], float* dst, const void* _src)
{
const uint32_t* src = _src;
long N = md_calc_size(D, dims);
#ifdef USE_CUDA
if (cuda_ondevice(src)) {
cuda_decompress(N, dst, src);
return;
}
#endif
#pragma omp parallel for
for (long i = 0; i < N / 32 + ((0 == N % 32) ? 0 : 1); i++) {
for (long j = 0; j < 32; j++) {
if (((32 * i + j) < N))
dst[32 * i + j] = MD_IS_SET(src[i], j) ? 1 : 0;
}
}
}
/**
* Allocate CPU memory
*
* return pointer to CPU memory
*/
void* md_alloc(int D, const long dimensions[D], size_t size)
{
return xmalloc(md_calc_size(D, dimensions) * size);
}
/**
* Allocate CPU memory and clear
*
* return pointer to CPU memory
*/
void* md_calloc(int D, const long dimensions[D], size_t size)
{
void* ptr = md_alloc(D, dimensions, size);
md_clear(D, dimensions, ptr, size);
return ptr;
}
#ifdef USE_CUDA
/**
* Allocate GPU memory
*
* return pointer to GPU memory
*/
void* md_alloc_gpu(int D, const long dimensions[D], size_t size)
{
return cuda_malloc(md_calc_size(D, dimensions) * size);
}
/**
* Allocate GPU memory and copy from CPU pointer
*
* return pointer to GPU memory
*/
void* md_gpu_move(int D, const long dims[D], const void* ptr, size_t size)
{
if (NULL == ptr)
return NULL;
void* gpu_ptr = md_alloc_gpu(D, dims, size);
md_copy(D, dims, gpu_ptr, ptr, size);
return gpu_ptr;
}
#endif
/**
* Allocate memory on the same device (CPU/GPU) place as ptr
*
* return pointer to CPU memory if ptr is in CPU or to GPU memory if ptr is in GPU
*/
void* md_alloc_sameplace(int D, const long dimensions[D], size_t size, const void* ptr)
{
#ifdef USE_CUDA
return (cuda_ondevice(ptr) ? md_alloc_gpu : md_alloc)(D, dimensions, size);
#else
assert(0 != ptr);
return md_alloc(D, dimensions, size);
#endif
}
/**
* Check whether memory is at sameplace
*/
bool md_is_sameplace(const void* ptr1, const void* ptr2)
{
assert(NULL != ptr1);
assert(NULL != ptr2);
#ifdef USE_CUDA
return cuda_ondevice(ptr1) == cuda_ondevice(ptr2);
#else
return true;
#endif
}
/**
* Free CPU/GPU memory
*
*/
void md_free(const void* ptr)
{
#ifdef USE_CUDA
if (cuda_ondevice(ptr))
cuda_free((void*)ptr);
else
#endif
xfree(ptr);
}
int md_max_idx(unsigned long flags)
{
int i = -1;
for ( ; 0 != flags; i++)
flags /= 2;
return i;
}
int md_min_idx(unsigned long flags)
{
return ffsl(flags) - 1;
}
/**
* Convert flat index to pos
*
*/
void md_unravel_index(int D, long pos[D], unsigned long flags, const long dims[D], long index)
{
long ind = index;
for (int d = 0; d < D; ++d) {
if (!MD_IS_SET(flags, d))
continue;
pos[d] = ind % dims[d];
ind /= dims[d];
}
}
|
d7bc7b13985e8eb95dd4754f884192f83ba1f3f8
|
f367e4b66a1ee42e85830b31df88f63723c36a47
|
/lib/luajit-3065c9/src/lj_snap.h
|
b7dabed80ddae7455d71c3d67810108c928bb4a4
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
fluent/fluent-bit
|
06873e441162b92941024e9a7e9e8fc934150bf7
|
1a41f49dc2f3ae31a780caa9ffd6137b1d703065
|
refs/heads/master
| 2023-09-05T13:44:55.347372
| 2023-09-05T10:14:33
| 2023-09-05T10:14:33
| 29,933,948
| 4,907
| 1,565
|
Apache-2.0
| 2023-09-14T10:17:02
| 2015-01-27T20:41:52
|
C
|
UTF-8
|
C
| false
| false
| 939
|
h
|
lj_snap.h
|
/*
** Snapshot handling.
** Copyright (C) 2005-2022 Mike Pall. See Copyright Notice in luajit.h
*/
#ifndef _LJ_SNAP_H
#define _LJ_SNAP_H
#include "lj_obj.h"
#include "lj_jit.h"
#if LJ_HASJIT
LJ_FUNC void lj_snap_add(jit_State *J);
LJ_FUNC void lj_snap_purge(jit_State *J);
LJ_FUNC void lj_snap_shrink(jit_State *J);
LJ_FUNC IRIns *lj_snap_regspmap(jit_State *J, GCtrace *T, SnapNo snapno,
IRIns *ir);
LJ_FUNC void lj_snap_replay(jit_State *J, GCtrace *T);
LJ_FUNC const BCIns *lj_snap_restore(jit_State *J, void *exptr);
LJ_FUNC void lj_snap_grow_buf_(jit_State *J, MSize need);
LJ_FUNC void lj_snap_grow_map_(jit_State *J, MSize need);
static LJ_AINLINE void lj_snap_grow_buf(jit_State *J, MSize need)
{
if (LJ_UNLIKELY(need > J->sizesnap)) lj_snap_grow_buf_(J, need);
}
static LJ_AINLINE void lj_snap_grow_map(jit_State *J, MSize need)
{
if (LJ_UNLIKELY(need > J->sizesnapmap)) lj_snap_grow_map_(J, need);
}
#endif
#endif
|
ce0e45d3f498a21dd62f33c78b27b01c2b7c3e82
|
ea401c3e792a50364fe11f7cea0f35f99e8f4bde
|
/released_plugins/v3d_plugins/bigneuron_AmosSironi_PrzemyslawGlowacki_SQBTree_plugin/libs/ITK_include/generic/vcl_sstream.h
|
53535930b8d411c80f459dd52600ed0f14b183f8
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Vaa3D/vaa3d_tools
|
edb696aa3b9b59acaf83d6d27c6ae0a14bf75fe9
|
e6974d5223ae70474efaa85e1253f5df1814fae8
|
refs/heads/master
| 2023-08-03T06:12:01.013752
| 2023-08-02T07:26:01
| 2023-08-02T07:26:01
| 50,527,925
| 107
| 86
|
MIT
| 2023-05-22T23:43:48
| 2016-01-27T18:19:17
|
C++
|
UTF-8
|
C
| false
| false
| 864
|
h
|
vcl_sstream.h
|
#ifndef vcl_generic_sstream_h_
#define vcl_generic_sstream_h_
// THIS IS A GENERATED FILE. DO NOT EDIT! -- Instead, edit vcl_sstream.hhh and run make
// basic_stringbuf
#ifndef vcl_basic_stringbuf
#define vcl_basic_stringbuf vcl_generic_sstream_STD :: basic_stringbuf
#endif
// stringbuf
#ifndef vcl_stringbuf
#define vcl_stringbuf vcl_generic_sstream_STD :: stringbuf
#endif
// wstringbuf
#ifndef vcl_wstringbuf
#define vcl_wstringbuf vcl_generic_sstream_STD :: wstringbuf
#endif
// stringstream
#ifndef vcl_stringstream
#define vcl_stringstream vcl_generic_sstream_STD :: stringstream
#endif
// istringstream
#ifndef vcl_istringstream
#define vcl_istringstream vcl_generic_sstream_STD :: istringstream
#endif
// ostringstream
#ifndef vcl_ostringstream
#define vcl_ostringstream vcl_generic_sstream_STD :: ostringstream
#endif
#endif // vcl_generic_sstream_h_
|
617110ac435f8657655cd73277519a6356783fd9
|
c15b0840ed5e8699ab99ab7637bf1b6a771b4518
|
/src/gpujpeg_reader.h
|
86d5896ea860c655384afbe1b6a1e9201e4a2c1f
|
[
"BSD-2-Clause"
] |
permissive
|
CESNET/GPUJPEG
|
bb3d4d7885b221e863014f0c1fb95348a577894a
|
012b2277aeedddfc3084dd49d63347d95c981b13
|
refs/heads/master
| 2023-09-04T14:48:46.652514
| 2023-07-31T10:00:19
| 2023-07-31T10:00:19
| 71,791,183
| 202
| 65
|
BSD-2-Clause
| 2022-06-15T09:42:19
| 2016-10-24T13:20:49
|
C
|
UTF-8
|
C
| false
| false
| 3,299
|
h
|
gpujpeg_reader.h
|
/**
* @file
* Copyright (c) 2011-2023, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GPUJPEG_READER_H
#define GPUJPEG_READER_H
#include "../libgpujpeg/gpujpeg_common.h"
#ifdef __cplusplus
extern "C" {
#endif
/** JPEG decoder structure predeclaration */
struct gpujpeg_decoder;
struct gpujpeg_reader;
/**
* Create JPEG reader
*
* @return reader structure if succeeds, otherwise NULL
*/
struct gpujpeg_reader*
gpujpeg_reader_create(void);
/**
* Destroy JPEG reader
*
* @param reader Reader structure
* @return 0 if succeeds, otherwise nonzero
*/
int
gpujpeg_reader_destroy(struct gpujpeg_reader* reader);
/**
* Read JPEG image from data buffer
*
* @param image Image data
* @param image_size Image data size
* @return 0 if succeeds, otherwise nonzero
*/
int
gpujpeg_reader_read_image(struct gpujpeg_decoder* decoder, uint8_t* image, size_t image_size);
/**
* Read image info from JPEG file
*
* Values read (if present) are: width, height, comp_count, color_space.
* If a value of a parameter cannot be read/deduced, corresponding member
* of gpujpeg_image_parameters is not modified. Thus the caller may initialize
* the members with some distictive values to detect this.
*
* @param image Image data
* @param image_size Image data size
* @param[out] param_image parameters obtained from image, must not be NULL
* @param[in,out] param parameters obtained from image (verbose parameter is used as an input param), non-NULL
* @param[out] segment_count number of segments (may be NULL if parameter segment_count is not needed)
* @return 0 if succeeds, otherwise nonzero
*
* @todo refactorize common code with gpujpeg_reader_read_image()
*/
int
gpujpeg_reader_get_image_info(uint8_t *image, size_t image_size, struct gpujpeg_image_parameters *param_image, struct gpujpeg_parameters *param, int *segment_count);
#ifdef __cplusplus
}
#endif
#endif // GPUJPEG_READER_H
|
fa31ed82a38268850abebb3f8ed89c5271b3cc1f
|
78b2b87d57c4db64463cdeba1f939e6b61a4a258
|
/test/iar/iar_v5/incIAR/AT91SAM7X-EK.h
|
98346759bf72740a28957829955e46d97c170405
|
[
"MIT"
] |
permissive
|
ThrowTheSwitch/CMock
|
0325f80713e897ae8b6e5dd8e402083e6ca343c8
|
c548629a478ef07f89db8b86c1dd4041a1cf4053
|
refs/heads/master
| 2023-09-03T15:10:22.935594
| 2023-08-23T12:32:14
| 2023-08-23T12:32:14
| 3,276,848
| 589
| 312
|
MIT
| 2023-08-10T10:46:19
| 2012-01-26T19:51:32
|
C
|
UTF-8
|
C
| false
| false
| 3,029
|
h
|
AT91SAM7X-EK.h
|
// ----------------------------------------------------------------------------
// ATMEL Microcontroller Software Support - ROUSSET -
// ----------------------------------------------------------------------------
// DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
// DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// ----------------------------------------------------------------------------
// File Name : AT91SAM7X-EK.h
// Object : AT91SAM7X-EK Evaluation Board Features Definition File
//
// ----------------------------------------------------------------------------
#ifndef AT91SAM7X_EK_H
#define AT91SAM7X_EK_H
/*-----------------*/
/* LEDs Definition */
/*-----------------*/
#define AT91B_LED1 (1<<19) // AT91C_PIO_PB19 AT91C_PB19_PWM0 AT91C_PB19_TCLK1
#define AT91B_LED2 (1<<20) // AT91C_PIO_PB20 AT91C_PB20_PWM1 AT91C_PB20_PWM1
#define AT91B_LED3 (AT91C_PIO_PB21) // AT91C_PIO_PB21 AT91C_PB21_PWM2 AT91C_PB21_PCK1
#define AT91B_LED4 (AT91C_PIO_PB22) // AT91C_PIO_PB22 AT91C_PB22_PWM3 AT91C_PB22_PCK2
#define AT91B_NB_LEB 4
#define AT91B_LED_MASK (AT91B_LED1|AT91B_LED2|AT91B_LED3|AT91B_LED4)
#define AT91D_BASE_PIO_LED (AT91C_BASE_PIOB)
#define AT91B_POWERLED (1<<25) // PB25
/*-------------------------------*/
/* JOYSTICK Position Definition */
/*-------------------------------*/
#define AT91B_SW1 (1<<21) // PA21 Up Button AT91C_PA21_TF AT91C_PA21_NPCS10
#define AT91B_SW2 (1<<22) // PA22 Down Button AT91C_PA22_TK AT91C_PA22_SPCK1
#define AT91B_SW3 (1<<23) // PA23 Left Button AT91C_PA23_TD AT91C_PA23_MOSI1
#define AT91B_SW4 (1<<24) // PA24 Right Button AT91C_PA24_RD AT91C_PA24_MISO1
#define AT91B_SW5 (1<<25) // PA25 Push Button AT91C_PA25_RK AT91C_PA25_NPCS11
#define AT91B_SW_MASK (AT91B_SW1|AT91B_SW2|AT91B_SW3|AT91B_SW4|AT91B_SW5)
#define AT91D_BASE_PIO_SW (AT91C_BASE_PIOA)
/*------------------*/
/* CAN Definition */
/*------------------*/
#define AT91B_CAN_TRANSCEIVER_RS (1<<2) // PA2
/*--------------*/
/* Clocks */
/*--------------*/
#define AT91B_MAIN_OSC 18432000 // Main Oscillator MAINCK
#define AT91B_MCK ((18432000*73/14)/2) // Output PLL Clock
#endif /* AT91SAM7X-EK_H */
|
db8e99803d915efc021085daabbd0d2a0c4c4a5d
|
9907672fcd81ab73ac63b2a83422a82bf31eadde
|
/atcoder/abs/6.c
|
07b4718a4b4a03e941b71432b0352ff604063403
|
[
"0BSD"
] |
permissive
|
cielavenir/procon
|
bbe1974b9bddb51b76d58722a0686a5b477c4456
|
746e1a91f574f20647e8aaaac0d9e6173f741176
|
refs/heads/master
| 2023-06-21T23:11:24.562546
| 2023-06-11T13:15:15
| 2023-06-11T13:15:15
| 7,557,464
| 137
| 136
| null | 2020-10-20T09:35:52
| 2013-01-11T09:40:26
|
C++
|
UTF-8
|
C
| false
| false
| 312
|
c
|
6.c
|
#include <stdio.h>
#include <stdlib.h>
int compare(const void *a,const void *b){
return *(int*)b-*(int*)a;
}
int main(){
int n,i,r=0,t=1;
int arr[999];
scanf("%d",&n);
for(i=0;i<n;i++)scanf("%d",&arr[i]);
qsort(arr,n,4,compare);
for(i=0;i<n;i++){
r+=t*arr[i];
t=-t;
}
printf("%d\n",r);
return 0;
}
|
00954b9d394fae8b4d0c5d56cb7e6b1b66b807c4
|
a65e2aac013b48ddfe5d6a7d108c2b84855d1148
|
/boot/zcbor/include/zcbor_common.h
|
f44ded6ad7609a0c6c886b2c6212bd04be76da51
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
mcu-tools/mcuboot
|
25ccdee3b1d319e09c9381cd00bc854efdc0142b
|
e188dbb0e1c1e2e2cecd627fbf2e13042222b1da
|
refs/heads/main
| 2023-08-30T09:17:10.520597
| 2023-08-29T06:43:16
| 2023-08-29T14:01:02
| 76,305,806
| 623
| 320
|
Apache-2.0
| 2023-09-13T11:46:03
| 2016-12-12T23:53:39
|
C
|
UTF-8
|
C
| false
| false
| 14,275
|
h
|
zcbor_common.h
|
/*
* This file has been copied from the zcbor library.
* Commit zcbor 0.7.0
*/
/*
* Copyright (c) 2020 Nordic Semiconductor ASA
*
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZCBOR_COMMON_H__
#define ZCBOR_COMMON_H__
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
/** Convenience type that allows pointing to strings directly inside the payload
* without the need to copy out.
*/
struct zcbor_string {
const uint8_t *value;
size_t len;
};
/** Type representing a string fragment.
*
* Don't modify any member variables, or subsequent calls may fail.
**/
struct zcbor_string_fragment {
struct zcbor_string fragment; ///! Location and length of the fragment.
size_t offset; ///! The offset in the full string at which this fragment belongs.
size_t total_len; ///! The total length of the string this fragment is a part of.
};
/** Size to use in struct zcbor_string_fragment when the real size is unknown. */
#define ZCBOR_STRING_FRAGMENT_UNKNOWN_LENGTH SIZE_MAX
#ifdef ZCBOR_VERBOSE
#include <zephyr/sys/printk.h>
#define zcbor_trace() (printk("bytes left: %zu, byte: 0x%x, elem_count: 0x%" PRIxFAST32 ", err: %d, %s:%d\n",\
(size_t)state->payload_end - (size_t)state->payload, *state->payload, state->elem_count, \
state->constant_state ? state->constant_state->error : 0, __FILE__, __LINE__))
#define zcbor_print_assert(expr, ...) \
do { \
printk("ASSERTION \n \"" #expr \
"\"\nfailed at %s:%d with message:\n ", \
__FILE__, __LINE__); \
printk(__VA_ARGS__);\
} while(0)
#define zcbor_print(...) printk(__VA_ARGS__)
#else
#define zcbor_trace() ((void)state)
#define zcbor_print_assert(...)
#define zcbor_print(...)
#endif
#ifdef ZCBOR_ASSERTS
#define zcbor_assert(expr, ...) \
do { \
if (!(expr)) { \
zcbor_print_assert(expr, __VA_ARGS__); \
ZCBOR_FAIL(); \
} \
} while(0)
#define zcbor_assert_state(expr, ...) \
do { \
if (!(expr)) { \
zcbor_print_assert(expr, __VA_ARGS__); \
ZCBOR_ERR(ZCBOR_ERR_ASSERTION); \
} \
} while(0)
#else
#define zcbor_assert(expr, ...)
#define zcbor_assert_state(expr, ...)
#endif
#ifndef MIN
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#endif
#ifndef ZCBOR_ARRAY_SIZE
#define ZCBOR_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
#endif
#if SIZE_MAX <= UINT64_MAX
/** The ZCBOR_SUPPORTS_SIZE_T will be defined if processing of size_t type variables directly
* with zcbor_size_ functions is supported.
**/
#define ZCBOR_SUPPORTS_SIZE_T
#else
#warning "zcbor: Unsupported size_t encoding size"
#endif
struct zcbor_state_constant;
typedef struct {
union {
uint8_t *payload_mut;
uint8_t const *payload; /**< The current place in the payload. Will be
updated when an element is correctly
processed. */
};
uint8_t const *payload_bak; /**< Temporary backup of payload. */
uint_fast32_t elem_count; /**< The current element is part of a LIST or a MAP,
and this keeps count of how many elements are
expected. This will be checked before processing
and decremented if the element is correctly
processed. */
uint8_t const *payload_end; /**< The end of the payload. This will be
checked against payload before
processing each element. */
bool indefinite_length_array; /**< Is set to true if the decoder is currently
decoding the contents of an indefinite-
length array. */
bool payload_moved; /**< Is set to true while the state is stored as a backup
if @ref zcbor_update_state is called, since that function
updates the payload_end of all backed-up states. */
struct zcbor_state_constant *constant_state; /**< The part of the state that is
not backed up and duplicated. */
} zcbor_state_t;
struct zcbor_state_constant {
zcbor_state_t *backup_list;
uint_fast32_t current_backup;
uint_fast32_t num_backups;
int error;
#ifdef ZCBOR_STOP_ON_ERROR
bool stop_on_error;
#endif
};
/** Function pointer type used with zcbor_multi_decode.
*
* This type is compatible with all decoding functions here and in the generated
* code, except for zcbor_multi_decode.
*/
typedef bool(zcbor_encoder_t)(zcbor_state_t *, const void *);
typedef bool(zcbor_decoder_t)(zcbor_state_t *, void *);
/** Enumeration representing the major types available in CBOR.
*
* The major type is represented in the 3 first bits of the header byte.
*/
typedef enum
{
ZCBOR_MAJOR_TYPE_PINT = 0, ///! Positive Integer
ZCBOR_MAJOR_TYPE_NINT = 1, ///! Negative Integer
ZCBOR_MAJOR_TYPE_BSTR = 2, ///! Byte String
ZCBOR_MAJOR_TYPE_TSTR = 3, ///! Text String
ZCBOR_MAJOR_TYPE_LIST = 4, ///! List
ZCBOR_MAJOR_TYPE_MAP = 5, ///! Map
ZCBOR_MAJOR_TYPE_TAG = 6, ///! Semantic Tag
ZCBOR_MAJOR_TYPE_PRIM = 7, ///! Primitive Type
} zcbor_major_type_t;
/** Convenience macro for failing out of a decoding/encoding function.
*/
#define ZCBOR_FAIL() \
do {\
zcbor_trace(); \
return false; \
} while(0)
#define ZCBOR_ERR(err) \
do { \
zcbor_error(state, err); \
ZCBOR_FAIL(); \
} while(0)
#define ZCBOR_ERR_IF(expr, err) \
do {\
if (expr) { \
ZCBOR_ERR(err); \
} \
} while(0)
#define ZCBOR_CHECK_PAYLOAD() \
ZCBOR_ERR_IF(state->payload >= state->payload_end, ZCBOR_ERR_NO_PAYLOAD)
#ifdef ZCBOR_STOP_ON_ERROR
#define ZCBOR_CHECK_ERROR() \
do { \
if (!zcbor_check_error(state)) { \
ZCBOR_FAIL(); \
} \
} while(0)
#else
#define ZCBOR_CHECK_ERROR()
#endif
#define ZCBOR_VALUE_IN_HEADER 23 ///! Values below this are encoded directly in the header.
#define ZCBOR_VALUE_IS_1_BYTE 24 ///! The next 1 byte contains the value.
#define ZCBOR_VALUE_IS_2_BYTES 25 ///! The next 2 bytes contain the value.
#define ZCBOR_VALUE_IS_4_BYTES 26 ///! The next 4 bytes contain the value.
#define ZCBOR_VALUE_IS_8_BYTES 27 ///! The next 8 bytes contain the value.
#define ZCBOR_VALUE_IS_INDEFINITE_LENGTH 31 ///! The list or map has indefinite length, and will instead be terminated by a 0xFF token.
#define ZCBOR_BOOL_TO_PRIM ((uint8_t)20) ///! In CBOR, false/true have the values 20/21
#define ZCBOR_FLAG_RESTORE 1UL ///! Restore from the backup. Overwrite the current state with the state from the backup.
#define ZCBOR_FLAG_CONSUME 2UL ///! Consume the backup. Remove the backup from the stack of backups.
#define ZCBOR_FLAG_TRANSFER_PAYLOAD 4UL ///! Keep the pre-restore payload after restoring.
#define ZCBOR_SUCCESS 0
#define ZCBOR_ERR_NO_BACKUP_MEM 1
#define ZCBOR_ERR_NO_BACKUP_ACTIVE 2
#define ZCBOR_ERR_LOW_ELEM_COUNT 3
#define ZCBOR_ERR_HIGH_ELEM_COUNT 4
#define ZCBOR_ERR_INT_SIZE 5
#define ZCBOR_ERR_FLOAT_SIZE 6
#define ZCBOR_ERR_ADDITIONAL_INVAL 7 ///! > 27
#define ZCBOR_ERR_NO_PAYLOAD 8
#define ZCBOR_ERR_PAYLOAD_NOT_CONSUMED 9
#define ZCBOR_ERR_WRONG_TYPE 10
#define ZCBOR_ERR_WRONG_VALUE 11
#define ZCBOR_ERR_WRONG_RANGE 12
#define ZCBOR_ERR_ITERATIONS 13
#define ZCBOR_ERR_ASSERTION 14
#define ZCBOR_ERR_UNKNOWN 31
/** The largest possible elem_count. */
#ifdef UINT_FAST32_MAX
#define ZCBOR_MAX_ELEM_COUNT UINT_FAST32_MAX
#else
#define ZCBOR_MAX_ELEM_COUNT ((uint_fast32_t)(-1L))
#endif
/** Initial value for elem_count for when it just needs to be large. */
#define ZCBOR_LARGE_ELEM_COUNT (ZCBOR_MAX_ELEM_COUNT - 16)
/** Values defined by RFC8949 via www.iana.org/assignments/cbor-tags/cbor-tags.xhtml */
enum zcbor_rfc8949_tag {
ZCBOR_TAG_TIME_TSTR = 0, ///! text string Standard date/time string
ZCBOR_TAG_TIME_NUM = 1, ///! integer or float Epoch-based date/time
ZCBOR_TAG_UBIGNUM_BSTR = 2, ///! byte string Unsigned bignum
ZCBOR_TAG_BIGNUM_BSTR = 3, ///! byte string Negative bignum
ZCBOR_TAG_DECFRAC_ARR = 4, ///! array Decimal fraction
ZCBOR_TAG_BIGFLOAT_ARR = 5, ///! array Bigfloat
ZCBOR_TAG_2BASE64URL = 21, ///! (any) Expected conversion to base64url encoding
ZCBOR_TAG_2BASE64 = 22, ///! (any) Expected conversion to base64 encoding
ZCBOR_TAG_2BASE16 = 23, ///! (any) Expected conversion to base16 encoding
ZCBOR_TAG_BSTR = 24, ///! byte string Encoded CBOR data item
ZCBOR_TAG_URI_TSTR = 32, ///! text string URI
ZCBOR_TAG_BASE64URL_TSTR = 33, ///! text string base64url
ZCBOR_TAG_BASE64_TSTR = 34, ///! text string base64
ZCBOR_TAG_MIME_TSTR = 36, ///! text string MIME message
ZCBOR_TAG_CBOR = 55799, ///! (any) Self-described CBOR
};
/** Take a backup of the current state. Overwrite the current elem_count. */
bool zcbor_new_backup(zcbor_state_t *state, uint_fast32_t new_elem_count);
/** Consult the most recent backup. In doing so, check whether elem_count is
* less than or equal to max_elem_count.
* Also, take action based on the flags (See ZCBOR_FLAG_*).
*/
bool zcbor_process_backup(zcbor_state_t *state, uint32_t flags, uint_fast32_t max_elem_count);
/** Convenience function for starting encoding/decoding of a union.
*
* That is, for attempting to encode, or especially decode, multiple options.
* Makes a new backup.
*/
bool zcbor_union_start_code(zcbor_state_t *state);
/** Convenience function before encoding/decoding one element of a union.
*
* Call this before attempting each option.
* Restores the backup, without consuming it.
*/
bool zcbor_union_elem_code(zcbor_state_t *state);
/** Convenience function before encoding/decoding one element of a union.
*
* Consumes the backup without restoring it.
*/
bool zcbor_union_end_code(zcbor_state_t *state);
/** Initialize a state with backups.
* As long as n_states is more than 1, one of the states in the array is used
* as a struct zcbor_state_constant object.
* If there is no struct zcbor_state_constant (n_states == 1), error codes are
* not available.
* This means that you get a state with (n_states - 2) backups.
* payload, payload_len, and elem_count are used to initialize the first state.
* in the array, which is the state that can be passed to cbor functions.
*/
void zcbor_new_state(zcbor_state_t *state_array, uint_fast32_t n_states,
const uint8_t *payload, size_t payload_len, uint_fast32_t elem_count);
#ifdef ZCBOR_STOP_ON_ERROR
/** Check stored error and fail if present, but only if stop_on_error is true. */
static inline bool zcbor_check_error(const zcbor_state_t *state)
{
struct zcbor_state_constant *cs = state->constant_state;
return !(cs && cs->stop_on_error && cs->error);
}
#endif
/** Return the current error state, replacing it with SUCCESS. */
static inline int zcbor_pop_error(zcbor_state_t *state)
{
if (!state->constant_state) {
return ZCBOR_SUCCESS;
}
int err = state->constant_state->error;
state->constant_state->error = ZCBOR_SUCCESS;
return err;
}
/** Look at current error state without altering it */
static inline int zcbor_peek_error(const zcbor_state_t *state)
{
if (!state->constant_state) {
return ZCBOR_SUCCESS;
} else {
return state->constant_state->error;
}
}
/** Write the provided error to the error state. */
static inline void zcbor_error(zcbor_state_t *state, int err)
{
#ifdef ZCBOR_STOP_ON_ERROR
if (zcbor_check_error(state))
#endif
{
if (state->constant_state) {
state->constant_state->error = err;
}
}
}
/** Whether the current payload is exhausted. */
static inline bool zcbor_payload_at_end(const zcbor_state_t *state)
{
return (state->payload == state->payload_end);
}
/** Update the current payload pointer (and payload_end).
*
* For use when the payload is divided into multiple chunks.
*
* This function also updates all backups to the new payload_end.
* This sets a flag so that if a backup is processed with the flag
* @ref ZCBOR_FLAG_RESTORE, but without the flag
* @ref ZCBOR_FLAG_TRANSFER_PAYLOAD since this would cause an invalid state.
*
* @param[inout] state The current state, will be updated with
* the new payload pointer.
* @param[in] payload The new payload chunk.
* @param[in] payload_len The length of the new payload chunk.
*/
void zcbor_update_state(zcbor_state_t *state,
const uint8_t *payload, size_t payload_len);
/** Check that the provided fragments are complete and in the right order.
*
* If the total length is not known, the total_len can have the value
* @ref ZCBOR_STRING_FRAGMENT_UNKNOWN_LENGTH. If so, all fragments will be
* updated with the actual total length.
*
* @param[in] fragments An array of string fragments. Cannot be NULL.
* @param[in] num_fragments The number of fragments in @p fragments.
*
* @retval true If the fragments are in the right order, and there are no
* fragments missing.
* @retval false If not all fragments have the same total_len, or gaps are
* found, or if any fragment value is NULL.
*/
bool zcbor_validate_string_fragments(struct zcbor_string_fragment *fragments,
uint_fast32_t num_fragments);
/** Assemble the fragments into a single string.
*
* The fragments are copied in the order they appear, without regard for
* offset or total_len. To ensure that the fragments are correct, first
* validate with @ref zcbor_validate_string_fragments.
*
* @param[in] fragments An array of string fragments. Cannot be NULL.
* @param[in] num_fragments The number of fragments in @p fragments.
* @param[out] result The buffer to place the assembled string into.
* @param[inout] result_len In: The length of the @p result.
* Out: The length of the assembled string.
*
* @retval true On success.
* @retval false If the assembled string would be larger than the buffer.
* The buffer might still be written to.
*/
bool zcbor_splice_string_fragments(struct zcbor_string_fragment *fragments,
uint_fast32_t num_fragments, uint8_t *result, size_t *result_len);
#ifdef __cplusplus
}
#endif
#endif /* ZCBOR_COMMON_H__ */
|
95decce4634617b6ac7f59bf22ddcfdb0fab84e1
|
39f38eab8acaf48f6c9bfd77f208984f7e19385b
|
/src/keyboard.h
|
cba806b09badceeb38fa3eb80ff965b696caf991
|
[
"MIT"
] |
permissive
|
eXeC64/imv
|
cb5e6333dd9d086cd4fbdbc49d2eb450b8d358d1
|
2144bea537bff9defc81c34517a939bf361fd736
|
refs/heads/master
| 2023-07-11T07:00:37.429872
| 2021-11-30T18:31:28
| 2021-11-30T18:31:28
| 45,642,440
| 944
| 92
|
MIT
| 2023-06-27T12:19:56
| 2015-11-05T22:05:13
|
C
|
UTF-8
|
C
| false
| false
| 1,405
|
h
|
keyboard.h
|
#ifndef IMV_KEYBOARD_H
#define IMV_KEYBOARD_H
#include <stdbool.h>
#include <unistd.h>
struct imv_keyboard;
/* Create a keyboard instance */
struct imv_keyboard *imv_keyboard_create(void);
/* Clean up a keyboard */
void imv_keyboard_free(struct imv_keyboard *keyboard);
/* Notify the keyboard of the state of a key */
void imv_keyboard_update_key(struct imv_keyboard *keyboard, int scancode, bool pressed);
/* Notify the keyboard of the state of the modifiers */
void imv_keyboard_update_mods(struct imv_keyboard *keyboard,
int depressed, int latched, int locked);
/* Write the null-terminated name of the key corresponding to scancode into buf */
size_t imv_keyboard_keyname(struct imv_keyboard *keyboard, int scancode, char *buf, size_t buflen);
/* Describe the key corresponding to scancode, with modifier keys prefixed */
char *imv_keyboard_describe_key(struct imv_keyboard *keyboard, int scancode);
/* Write the null-terminated text generated by scancode being pressed into buf */
size_t imv_keyboard_get_text(struct imv_keyboard *keyboard, int scancode, char *buf, size_t buflen);
/* Initialise the keymap from a string containing the description */
void imv_keyboard_set_keymap(struct imv_keyboard *keyboard, const char *keymap);
/* Should the key on a given scancode repeat when held down */
bool imv_keyboard_should_key_repeat(struct imv_keyboard *keyboard, int scancode);
#endif
|
152a08166a8c6c3ce57d13b712243d666fbd36a9
|
5eff7a36d9a9917dce9111f0c3074375fe6f7656
|
/lib/mesa/src/gallium/drivers/radeonsi/si_cp_reg_shadowing.c
|
d58b9a90bb957463b0ff380c806b5b5066b38793
|
[] |
no_license
|
openbsd/xenocara
|
cb392d02ebba06f6ff7d826fd8a89aa3b8401779
|
a012b5de33ea0b977095d77316a521195b26cc6b
|
refs/heads/master
| 2023-08-25T12:16:58.862008
| 2023-08-12T16:16:25
| 2023-08-12T16:16:25
| 66,967,384
| 177
| 66
| null | 2023-07-22T18:12:37
| 2016-08-30T18:36:01
|
C
|
UTF-8
|
C
| false
| false
| 10,242
|
c
|
si_cp_reg_shadowing.c
|
/*
* Copyright 2020 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "si_build_pm4.h"
#include "ac_debug.h"
#include "ac_shadowed_regs.h"
#include "util/u_memory.h"
static void si_build_load_reg(struct si_screen *sscreen, struct si_pm4_state *pm4,
enum ac_reg_range_type type,
struct si_resource *shadow_regs)
{
uint64_t gpu_address = shadow_regs->gpu_address;
unsigned packet, num_ranges, offset;
const struct ac_reg_range *ranges;
ac_get_reg_ranges(sscreen->info.gfx_level, sscreen->info.family,
type, &num_ranges, &ranges);
switch (type) {
case SI_REG_RANGE_UCONFIG:
gpu_address += SI_SHADOWED_UCONFIG_REG_OFFSET;
offset = CIK_UCONFIG_REG_OFFSET;
packet = PKT3_LOAD_UCONFIG_REG;
break;
case SI_REG_RANGE_CONTEXT:
gpu_address += SI_SHADOWED_CONTEXT_REG_OFFSET;
offset = SI_CONTEXT_REG_OFFSET;
packet = PKT3_LOAD_CONTEXT_REG;
break;
default:
gpu_address += SI_SHADOWED_SH_REG_OFFSET;
offset = SI_SH_REG_OFFSET;
packet = PKT3_LOAD_SH_REG;
break;
}
si_pm4_cmd_add(pm4, PKT3(packet, 1 + num_ranges * 2, 0));
si_pm4_cmd_add(pm4, gpu_address);
si_pm4_cmd_add(pm4, gpu_address >> 32);
for (unsigned i = 0; i < num_ranges; i++) {
si_pm4_cmd_add(pm4, (ranges[i].offset - offset) / 4);
si_pm4_cmd_add(pm4, ranges[i].size / 4);
}
}
static struct si_pm4_state *
si_create_shadowing_ib_preamble(struct si_context *sctx)
{
struct si_shadow_preamble {
struct si_pm4_state pm4;
uint32_t more_pm4[150]; /* Add more space because the command buffer is large. */
};
struct si_pm4_state *pm4 = (struct si_pm4_state *)CALLOC_STRUCT(si_shadow_preamble);
/* Add all the space that we allocated. */
pm4->max_dw = (sizeof(struct si_shadow_preamble) - offsetof(struct si_shadow_preamble, pm4.pm4)) / 4;
if (sctx->screen->dpbb_allowed) {
si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
}
/* Wait for idle, because we'll update VGT ring pointers. */
si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
/* VGT_FLUSH is required even if VGT is idle. It resets VGT pointers. */
si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
if (sctx->gfx_level >= GFX11) {
/* We must wait for idle using an EOP event before changing the attribute ring registers.
* Use the bottom-of-pipe EOP event, but increment the PWS counter instead of writing memory.
*/
si_pm4_cmd_add(pm4, PKT3(PKT3_RELEASE_MEM, 6, 0));
si_pm4_cmd_add(pm4, S_490_EVENT_TYPE(V_028A90_BOTTOM_OF_PIPE_TS) |
S_490_EVENT_INDEX(5) |
S_490_PWS_ENABLE(1));
si_pm4_cmd_add(pm4, 0); /* DST_SEL, INT_SEL, DATA_SEL */
si_pm4_cmd_add(pm4, 0); /* ADDRESS_LO */
si_pm4_cmd_add(pm4, 0); /* ADDRESS_HI */
si_pm4_cmd_add(pm4, 0); /* DATA_LO */
si_pm4_cmd_add(pm4, 0); /* DATA_HI */
si_pm4_cmd_add(pm4, 0); /* INT_CTXID */
unsigned gcr_cntl = S_586_GL2_INV(1) | S_586_GL2_WB(1) |
S_586_GLM_INV(1) | S_586_GLM_WB(1) |
S_586_GL1_INV(1) | S_586_GLV_INV(1) |
S_586_GLK_INV(1) | S_586_GLI_INV(V_586_GLI_ALL);
/* Wait for the PWS counter. */
si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
si_pm4_cmd_add(pm4, S_580_PWS_STAGE_SEL(V_580_CP_PFP) |
S_580_PWS_COUNTER_SEL(V_580_TS_SELECT) |
S_580_PWS_ENA2(1) |
S_580_PWS_COUNT(0));
si_pm4_cmd_add(pm4, 0xffffffff); /* GCR_SIZE */
si_pm4_cmd_add(pm4, 0x01ffffff); /* GCR_SIZE_HI */
si_pm4_cmd_add(pm4, 0); /* GCR_BASE_LO */
si_pm4_cmd_add(pm4, 0); /* GCR_BASE_HI */
si_pm4_cmd_add(pm4, S_585_PWS_ENA(1));
si_pm4_cmd_add(pm4, gcr_cntl); /* GCR_CNTL */
} else if (sctx->gfx_level >= GFX10) {
unsigned gcr_cntl = S_586_GL2_INV(1) | S_586_GL2_WB(1) |
S_586_GLM_INV(1) | S_586_GLM_WB(1) |
S_586_GL1_INV(1) | S_586_GLV_INV(1) |
S_586_GLK_INV(1) | S_586_GLI_INV(V_586_GLI_ALL);
si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 6, 0));
si_pm4_cmd_add(pm4, 0); /* CP_COHER_CNTL */
si_pm4_cmd_add(pm4, 0xffffffff); /* CP_COHER_SIZE */
si_pm4_cmd_add(pm4, 0xffffff); /* CP_COHER_SIZE_HI */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
si_pm4_cmd_add(pm4, 0x0000000A); /* POLL_INTERVAL */
si_pm4_cmd_add(pm4, gcr_cntl); /* GCR_CNTL */
si_pm4_cmd_add(pm4, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
si_pm4_cmd_add(pm4, 0);
} else if (sctx->gfx_level == GFX9) {
unsigned cp_coher_cntl = S_0301F0_SH_ICACHE_ACTION_ENA(1) |
S_0301F0_SH_KCACHE_ACTION_ENA(1) |
S_0301F0_TC_ACTION_ENA(1) |
S_0301F0_TCL1_ACTION_ENA(1) |
S_0301F0_TC_WB_ACTION_ENA(1);
si_pm4_cmd_add(pm4, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
si_pm4_cmd_add(pm4, cp_coher_cntl); /* CP_COHER_CNTL */
si_pm4_cmd_add(pm4, 0xffffffff); /* CP_COHER_SIZE */
si_pm4_cmd_add(pm4, 0xffffff); /* CP_COHER_SIZE_HI */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE */
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
si_pm4_cmd_add(pm4, 0x0000000A); /* POLL_INTERVAL */
si_pm4_cmd_add(pm4, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
si_pm4_cmd_add(pm4, 0);
} else {
unreachable("invalid chip");
}
si_pm4_cmd_add(pm4, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
si_pm4_cmd_add(pm4,
CC0_UPDATE_LOAD_ENABLES(1) |
CC0_LOAD_PER_CONTEXT_STATE(1) |
CC0_LOAD_CS_SH_REGS(1) |
CC0_LOAD_GFX_SH_REGS(1) |
CC0_LOAD_GLOBAL_UCONFIG(1));
si_pm4_cmd_add(pm4,
CC1_UPDATE_SHADOW_ENABLES(1) |
CC1_SHADOW_PER_CONTEXT_STATE(1) |
CC1_SHADOW_CS_SH_REGS(1) |
CC1_SHADOW_GFX_SH_REGS(1) |
CC1_SHADOW_GLOBAL_UCONFIG(1));
for (unsigned i = 0; i < SI_NUM_SHADOWED_REG_RANGES; i++)
si_build_load_reg(sctx->screen, pm4, i, sctx->shadowed_regs);
return pm4;
}
static void si_set_context_reg_array(struct radeon_cmdbuf *cs, unsigned reg, unsigned num,
const uint32_t *values)
{
radeon_begin(cs);
radeon_set_context_reg_seq(reg, num);
radeon_emit_array(values, num);
radeon_end();
}
void si_init_cp_reg_shadowing(struct si_context *sctx)
{
if (sctx->screen->info.mid_command_buffer_preemption_enabled ||
sctx->screen->debug_flags & DBG(SHADOW_REGS)) {
sctx->shadowed_regs =
si_aligned_buffer_create(sctx->b.screen,
PIPE_RESOURCE_FLAG_UNMAPPABLE | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
PIPE_USAGE_DEFAULT,
SI_SHADOWED_REG_BUFFER_SIZE,
4096);
if (!sctx->shadowed_regs)
fprintf(stderr, "radeonsi: cannot create a shadowed_regs buffer\n");
}
si_init_cs_preamble_state(sctx, sctx->shadowed_regs != NULL);
if (sctx->shadowed_regs) {
/* We need to clear the shadowed reg buffer. */
si_cp_dma_clear_buffer(sctx, &sctx->gfx_cs, &sctx->shadowed_regs->b.b,
0, sctx->shadowed_regs->bo_size, 0, SI_OP_SYNC_AFTER,
SI_COHERENCY_CP, L2_BYPASS);
/* Create the shadowing preamble. */
struct si_pm4_state *shadowing_preamble =
si_create_shadowing_ib_preamble(sctx);
/* Initialize shadowed registers as follows. */
radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, sctx->shadowed_regs,
RADEON_USAGE_READWRITE | RADEON_PRIO_DESCRIPTORS);
si_pm4_emit(sctx, shadowing_preamble);
ac_emulate_clear_state(&sctx->screen->info, &sctx->gfx_cs, si_set_context_reg_array);
si_pm4_emit(sctx, sctx->cs_preamble_state);
/* The register values are shadowed, so we won't need to set them again. */
si_pm4_free_state(sctx, sctx->cs_preamble_state, ~0);
sctx->cs_preamble_state = NULL;
si_set_tracked_regs_to_clear_state(sctx);
/* Setup preemption. The shadowing preamble will be executed as a preamble IB,
* which will load register values from memory on a context switch.
*/
sctx->ws->cs_setup_preemption(&sctx->gfx_cs, shadowing_preamble->pm4,
shadowing_preamble->ndw);
si_pm4_free_state(sctx, shadowing_preamble, ~0);
}
}
|
82bbe2495692a0343feb07a313e497f34797bf53
|
167c6226bc77c5daaedab007dfdad4377f588ef4
|
/cpp/ql/test/library-tests/attributes/var_attributes/var_attributes.c
|
336b6971bdfbd9f06fa917a2507b86f2adee587f
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
github/codeql
|
1eebb449a34f774db9e881b52cb8f7a1b1a53612
|
d109637e2d7ab3b819812eb960c05cb31d9d2168
|
refs/heads/main
| 2023-08-20T11:32:39.162059
| 2023-08-18T14:33:32
| 2023-08-18T14:33:32
| 143,040,428
| 5,987
| 1,363
|
MIT
| 2023-09-14T19:36:50
| 2018-07-31T16:35:51
|
CodeQL
|
UTF-8
|
C
| false
| false
| 248
|
c
|
var_attributes.c
|
int weak_var __attribute__((weak));
static int weakref_var __attribute__((weakref));
static int used_var __attribute__((used));
static int unused_var __attribute__((unused));
static void f1(unsigned unused_param __attribute__((unused))) {}
|
c8aa26d0d0183856768e1339458c51c15fad6573
|
56e615c555c423a3bc68ad0e9a5677c7c94960ef
|
/libi2pd_wrapper/capi.h
|
aefd89f37a6acc43d4843bed29f6f8282542a229
|
[
"BSD-3-Clause",
"OpenSSL"
] |
permissive
|
PurpleI2P/i2pd
|
bc1f5c2513e3f0b7a51b274001e343142c403f79
|
7b6aa41ca8dc3ca2a0c9d01ff9870794ad98e403
|
refs/heads/openssl
| 2023-09-04T04:44:21.112974
| 2023-08-31T16:52:51
| 2023-08-31T16:52:51
| 12,522,239
| 2,642
| 540
|
BSD-3-Clause
| 2023-09-07T14:25:41
| 2013-09-01T15:09:28
|
C++
|
UTF-8
|
C
| false
| false
| 671
|
h
|
capi.h
|
/*
* Copyright (c) 2021-2022, The PurpleI2P Project
*
* This file is part of Purple i2pd project and licensed under BSD3
*
* See full license text in LICENSE file at top of project tree
*/
#ifndef CAPI_H__
#define CAPI_H__
#ifdef __cplusplus
extern "C" {
#endif
// initialization start and stop
void C_InitI2P (int argc, char *argv[], const char * appName);
//void C_InitI2P (int argc, char** argv, const char * appName);
void C_TerminateI2P ();
void C_StartI2P ();
// write system log to logStream, if not specified to <appName>.log in application's folder
void C_StopI2P ();
void C_RunPeerTest (); // should be called after UPnP
#ifdef __cplusplus
}
#endif
#endif
|
083793f523bf7c233d70e3d13fbedf5248b6f2ae
|
2376f587d9ecf892b2e6af0ba8f35a387b75160d
|
/src/rtpp_ringbuf.c
|
8fae492d9a32331df7bae531657f2892887a3c15
|
[
"BSD-2-Clause"
] |
permissive
|
sippy/rtpproxy
|
605939c4afef8cdbb78e09d8fbe1deef80a3828d
|
a7f1b1e3617ca8179f7ff6619d5920ecc615bd20
|
refs/heads/master
| 2023-08-27T21:37:48.239135
| 2023-07-24T20:33:48
| 2023-07-24T22:01:48
| 20,834,852
| 396
| 122
|
BSD-2-Clause
| 2023-01-20T00:39:46
| 2014-06-14T15:22:11
|
Makefile
|
UTF-8
|
C
| false
| false
| 3,890
|
c
|
rtpp_ringbuf.c
|
/*
* Copyright (c) 2004-2006 Maxim Sobolev <sobomax@FreeBSD.org>
* Copyright (c) 2006-2015 Sippy Software, Inc., http://www.sippysoft.com
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include "rtpp_types.h"
#include "rtpp_refcnt.h"
#include "rtpp_ringbuf.h"
#include "rtpp_ringbuf_fin.h"
#include "rtpp_mallocs.h"
struct rtpp_ringbuf_priv
{
struct rtpp_ringbuf pub;
void *elements;
int nelements;
size_t el_size;
int c_elem;
int b_full;
};
static void rtpp_ringbuf_dtor(struct rtpp_ringbuf_priv *);
static void rtpp_ringbuf_push(struct rtpp_ringbuf *, void *);
static void rtpp_ringbuf_flush(struct rtpp_ringbuf *);
static int rtpp_ringbuf_locate(struct rtpp_ringbuf *, void *);
DEFINE_SMETHODS(rtpp_ringbuf,
.push = &rtpp_ringbuf_push,
.flush = &rtpp_ringbuf_flush,
.locate = &rtpp_ringbuf_locate,
);
struct rtpp_ringbuf *
rtpp_ringbuf_ctor(size_t el_size, int nelements)
{
struct rtpp_ringbuf_priv *pvt;
pvt = rtpp_rzmalloc(sizeof(struct rtpp_ringbuf_priv), PVT_RCOFFS(pvt));
if (pvt == NULL) {
goto e0;
}
pvt->elements = rtpp_zmalloc(el_size * nelements);
if (pvt->elements == NULL) {
goto e1;
}
pvt->el_size = el_size;
pvt->nelements = nelements;
PUBINST_FININIT(&pvt->pub, pvt, rtpp_ringbuf_dtor);
return (&pvt->pub);
e1:
RTPP_OBJ_DECREF(&(pvt->pub));
free(pvt);
e0:
return (NULL);
}
static void
rtpp_ringbuf_dtor(struct rtpp_ringbuf_priv *pvt)
{
rtpp_ringbuf_fin(&(pvt->pub));
free(pvt->elements);
free(pvt);
}
static void
rtpp_ringbuf_push(struct rtpp_ringbuf *self, void *data)
{
struct rtpp_ringbuf_priv *pvt;
void *dp;
PUB2PVT(self, pvt);
dp = (char *)pvt->elements + (pvt->el_size * pvt->c_elem);
memcpy(dp, data, pvt->el_size);
pvt->c_elem++;
if (pvt->c_elem == pvt->nelements) {
if (pvt->b_full == 0) {
pvt->b_full = 1;
}
pvt->c_elem = 0;
}
}
static void
rtpp_ringbuf_flush(struct rtpp_ringbuf *self)
{
struct rtpp_ringbuf_priv *pvt;
PUB2PVT(self, pvt);
pvt->b_full = 0;
pvt->c_elem = 0;
}
static int
rtpp_ringbuf_locate(struct rtpp_ringbuf *self, void *data)
{
struct rtpp_ringbuf_priv *pvt;
int i, last_el;
void *dp;
PUB2PVT(self, pvt);
last_el = (pvt->b_full != 0) ? pvt->nelements : pvt->c_elem;
for (i = 0; i < last_el; i++) {
dp = (char *)pvt->elements + (pvt->el_size * i);
if (memcmp(dp, data, pvt->el_size) == 0) {
return (i);
}
}
return (-1);
}
|
1ac592c76240897383f9a999090ca0c7bc4e41fe
|
9ceacf33fd96913cac7ef15492c126d96cae6911
|
/usr.bin/mandoc/term_tab.c
|
160a014592fc2bc45b35c976c6fb313989ac2b4e
|
[] |
no_license
|
openbsd/src
|
ab97ef834fd2d5a7f6729814665e9782b586c130
|
9e79f3a0ebd11a25b4bff61e900cb6de9e7795e9
|
refs/heads/master
| 2023-09-02T18:54:56.624627
| 2023-09-02T15:16:12
| 2023-09-02T15:16:12
| 66,966,208
| 3,394
| 1,235
| null | 2023-08-08T02:42:25
| 2016-08-30T18:18:25
|
C
|
UTF-8
|
C
| false
| false
| 3,232
|
c
|
term_tab.c
|
/* $OpenBSD: term_tab.c,v 1.5 2021/10/04 18:56:24 schwarze Exp $ */
/*
* Copyright (c) 2017, 2021 Ingo Schwarze <schwarze@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/types.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include "mandoc_aux.h"
#include "out.h"
#include "term.h"
struct tablist {
size_t *t; /* Allocated array of tab positions. */
size_t s; /* Allocated number of positions. */
size_t n; /* Currently used number of positions. */
};
static struct {
struct tablist a; /* All tab positions for lookup. */
struct tablist p; /* Periodic tab positions to add. */
struct tablist *r; /* Tablist currently being recorded. */
size_t d; /* Default tab width in units of n. */
} tabs;
void
term_tab_set(const struct termp *p, const char *arg)
{
struct roffsu su;
struct tablist *tl;
size_t pos;
int add;
/* Special arguments: clear all tabs or switch lists. */
if (arg == NULL) {
tabs.a.n = tabs.p.n = 0;
tabs.r = &tabs.a;
if (tabs.d == 0) {
a2roffsu(".8i", &su, SCALE_IN);
tabs.d = term_hen(p, &su);
}
return;
}
if (arg[0] == 'T' && arg[1] == '\0') {
tabs.r = &tabs.p;
return;
}
/* Parse the sign, the number, and the unit. */
if (*arg == '+') {
add = 1;
arg++;
} else
add = 0;
if (a2roffsu(arg, &su, SCALE_EM) == NULL)
return;
/* Select the list, and extend it if it is full. */
tl = tabs.r;
if (tl->n >= tl->s) {
tl->s += 8;
tl->t = mandoc_reallocarray(tl->t, tl->s, sizeof(*tl->t));
}
/* Append the new position. */
pos = term_hen(p, &su);
tl->t[tl->n] = pos;
if (add && tl->n)
tl->t[tl->n] += tl->t[tl->n - 1];
tl->n++;
}
/*
* Simplified version without a parser,
* never incremental, never periodic, for use by tbl(7).
*/
void
term_tab_iset(size_t inc)
{
if (tabs.a.n >= tabs.a.s) {
tabs.a.s += 8;
tabs.a.t = mandoc_reallocarray(tabs.a.t, tabs.a.s,
sizeof(*tabs.a.t));
}
tabs.a.t[tabs.a.n++] = inc;
}
size_t
term_tab_next(size_t prev)
{
size_t i, j;
for (i = 0;; i++) {
if (i == tabs.a.n) {
if (tabs.p.n == 0)
return prev;
tabs.a.n += tabs.p.n;
if (tabs.a.s < tabs.a.n) {
tabs.a.s = tabs.a.n;
tabs.a.t = mandoc_reallocarray(tabs.a.t,
tabs.a.s, sizeof(*tabs.a.t));
}
for (j = 0; j < tabs.p.n; j++)
tabs.a.t[i + j] = tabs.p.t[j] +
(i ? tabs.a.t[i - 1] : 0);
}
if (prev < tabs.a.t[i])
return tabs.a.t[i];
}
}
void
term_tab_free(void)
{
free(tabs.a.t);
free(tabs.p.t);
memset(&tabs, 0, sizeof(tabs));
tabs.r = &tabs.a;
}
|
940fe8ceb2d564cab1416296970f2af7db46c407
|
b6acd6eed2b8946c1c1e19fa30081cbab0a2954f
|
/starry_fmu/RTOS/components/external/SQLite-3.8.1/src/sqlite_config_rtthread.h
|
5b4fb1858a3405c6bd1170ac0e4117c12fc57bd7
|
[
"BSD-3-Clause"
] |
permissive
|
JcZou/StarryPilot
|
7ce1ed454f133ccd30d71916811e2bf23196d2eb
|
97af0338a54e1eeece877c72222aeaf4b7e80ad7
|
refs/heads/master
| 2023-03-12T19:10:17.225314
| 2021-11-27T19:44:26
| 2021-11-27T19:44:26
| 137,048,745
| 304
| 172
|
BSD-3-Clause
| 2020-08-26T07:34:49
| 2018-06-12T09:27:59
|
C
|
UTF-8
|
C
| false
| false
| 1,039
|
h
|
sqlite_config_rtthread.h
|
#ifndef _SQLITE_CONFIG_RTTHREAD_H_
#define _SQLITE_CONFIG_RTTHREAD_H_
/*
* SQLite compile macro
*/
#ifndef SQLITE_MINIMUM_FILE_DESCRIPTOR
#define SQLITE_MINIMUM_FILE_DESCRIPTOR 0
#endif
#ifndef SQLITE_OMIT_LOAD_EXTENSION
#define SQLITE_OMIT_LOAD_EXTENSION 1
#endif
//#ifndef #define SQLITE_OMIT_WAL
#define SQLITE_OMIT_WAL
//#endif
#ifndef SQLITE_RTTHREAD_NO_WIDE
#define SQLITE_RTTHREAD_NO_WIDE 1
#endif
#ifndef SQLITE_ENABLE_LOCKING_STYLE
#define SQLITE_ENABLE_LOCKING_STYLE 0
#endif
#ifndef SQLITE_DISABLE_LOCKING_STYLE
#define SQLITE_DISABLE_LOCKING_STYLE 1
#endif
#ifndef SQLITE_TEMP_STORE
#define SQLITE_TEMP_STORE 1
#endif
#ifndef SQLITE_THREADSAFE
#define SQLITE_THREADSAFE 1
#endif
#ifndef HAVE_READLINE
#define HAVE_READLINE 0
#endif
#ifndef NDEBUG
#define NDEBUG
#endif
#ifndef _HAVE_SQLITE_CONFIG_H
#define _HAVE_SQLITE_CONFIG_H
#endif
#ifndef BUILD_sqlite
#define BUILD_sqlite
#endif
#ifndef SQLITE_OS_OTHER
#define SQLITE_OS_OTHER 1
#endif
#ifndef SQLITE_OS_RTTHREAD
#define SQLITE_OS_RTTHREAD 1
#endif
#endif
|
daf4e9abfafc9d43ed9fac24174d28e59cb4cb89
|
5eff7a36d9a9917dce9111f0c3074375fe6f7656
|
/lib/mesa/src/freedreno/vulkan/tu_nir_lower_multiview.c
|
6b4ea3020eeb39d702b3f4d9a4d3c72322b8fd47
|
[] |
no_license
|
openbsd/xenocara
|
cb392d02ebba06f6ff7d826fd8a89aa3b8401779
|
a012b5de33ea0b977095d77316a521195b26cc6b
|
refs/heads/master
| 2023-08-25T12:16:58.862008
| 2023-08-12T16:16:25
| 2023-08-12T16:16:25
| 66,967,384
| 177
| 66
| null | 2023-07-22T18:12:37
| 2016-08-30T18:36:01
|
C
|
UTF-8
|
C
| false
| false
| 3,820
|
c
|
tu_nir_lower_multiview.c
|
/*
* Copyright © 2020 Valve Corporation
* SPDX-License-Identifier: MIT
*/
#include "tu_shader.h"
#include "nir_builder.h"
#include "tu_device.h"
/* Some a6xx variants cannot support a non-contiguous multiview mask. Instead,
* inside the shader something like this needs to be inserted:
*
* gl_Position = ((1ull << gl_ViewIndex) & view_mask) ? gl_Position : vec4(0.);
*
* Scan backwards until we find the gl_Position write (there should only be
* one).
*/
static bool
lower_multiview_mask(nir_shader *nir, uint32_t *mask)
{
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
if (util_is_power_of_two_or_zero(*mask + 1)) {
nir_metadata_preserve(impl, nir_metadata_all);
return false;
}
nir_builder b;
nir_builder_init(&b, impl);
uint32_t old_mask = *mask;
*mask = BIT(util_logbase2(old_mask) + 1) - 1;
nir_foreach_block_reverse(block, impl) {
nir_foreach_instr_reverse(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
if (intrin->intrinsic != nir_intrinsic_store_deref)
continue;
nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
if (!nir_deref_mode_is(deref, nir_var_shader_out))
continue;
nir_variable *var = nir_deref_instr_get_variable(deref);
if (var->data.location != VARYING_SLOT_POS)
continue;
assert(intrin->src[1].is_ssa);
nir_ssa_def *orig_src = intrin->src[1].ssa;
b.cursor = nir_before_instr(instr);
/* ((1ull << gl_ViewIndex) & mask) != 0 */
nir_ssa_def *cmp =
nir_i2b(&b, nir_iand(&b, nir_imm_int(&b, old_mask),
nir_ishl(&b, nir_imm_int(&b, 1),
nir_load_view_index(&b))));
nir_ssa_def *src = nir_bcsel(&b, cmp, orig_src, nir_imm_float(&b, 0.));
nir_instr_rewrite_src(instr, &intrin->src[1], nir_src_for_ssa(src));
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
return true;
}
}
nir_metadata_preserve(impl, nir_metadata_all);
return false;
}
bool
tu_nir_lower_multiview(nir_shader *nir, uint32_t mask, struct tu_device *dev)
{
bool progress = false;
if (!dev->physical_device->info->a6xx.supports_multiview_mask)
NIR_PASS(progress, nir, lower_multiview_mask, &mask);
unsigned num_views = util_logbase2(mask) + 1;
/* Blob doesn't apply multipos optimization starting from 11 views
* even on a650, however in practice, with the limit of 16 views,
* tests pass on a640/a650 and fail on a630.
*/
unsigned max_views_for_multipos =
dev->physical_device->info->a6xx.supports_multiview_mask ? 16 : 10;
/* Speculatively assign output locations so that we know num_outputs. We
* will assign output locations for real after this pass.
*/
unsigned num_outputs;
nir_assign_io_var_locations(nir, nir_var_shader_out, &num_outputs, MESA_SHADER_VERTEX);
/* In addition to the generic checks done by NIR, check that we don't
* overflow VPC with the extra copies of gl_Position.
*/
if (likely(!(dev->physical_device->instance->debug_flags & TU_DEBUG_NOMULTIPOS)) &&
num_views <= max_views_for_multipos && num_outputs + (num_views - 1) <= 32 &&
nir_can_lower_multiview(nir)) {
/* It appears that the multiview mask is ignored when multi-position
* output is enabled, so we have to write 0 to inactive views ourselves.
*/
NIR_PASS(progress, nir, lower_multiview_mask, &mask);
NIR_PASS_V(nir, nir_lower_multiview, mask);
progress = true;
}
return progress;
}
|
9da7c02b908723790f71eb7c9b543d7d0a7dfbaf
|
fa1a4c9c404b20cac10f537c419489ed8bb84ede
|
/examples/cx16/k_datetime_test.c
|
495a49d7bde208a2860a4e217d3ff27190b392d0
|
[
"LLVM-exception",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
llvm-mos/llvm-mos-sdk
|
cff8ec1780b5c79b63649ba567acb7cd271f0c32
|
18e0edd90648f67717b26f99559dae408eaa8b32
|
refs/heads/main
| 2023-08-31T15:47:28.907521
| 2023-08-29T16:40:59
| 2023-08-29T17:31:04
| 348,140,902
| 170
| 37
|
NOASSERTION
| 2023-09-14T19:46:55
| 2021-03-15T22:30:33
|
C
|
UTF-8
|
C
| false
| false
| 1,256
|
c
|
k_datetime_test.c
|
// llvm-mos-sdk cx16 kernel test
//
// vim: set et ts=4 sw=4
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <cbm.h>
#include <cx16.h>
static void hexdump(const void *ptr, size_t bytes) {
uint8_t *p = (uint8_t *)ptr;
for (size_t i = 0; i < bytes; i++) {
if ((i & 0xf) == 0) {
if (i) {
printf("\n");
}
printf("%04x: ", i);
} else {
printf(", ");
}
printf("%02x", p[i]);
}
printf("\n");
}
int main(void) {
static cx16_date_time_t tm;
putchar(15); // ISO mode
printf("llvm-mos-sdk CX16 Kernal Test\n\n");
printf("\ncx16_k_clock_get_date_time(&tm);\n");
cx16_k_clock_get_date_time(&tm);
printf("Result: year=%d, mon=%d, day=%d, hour=%d, min=%d, sec=%d, jif=%d\n",
1900 + tm.year, tm.mon, tm.day, tm.hour, tm.min, tm.sec, tm.jif);
hexdump(&tm, sizeof(tm));
printf("\ncx16_k_clock_set_date_time(2023-1900, 4, 5, 10, 12, 13, 42);\n");
cx16_k_clock_set_date_time(2023 - 1900, 4, 5, 10, 12, 13, 42);
cx16_k_clock_get_date_time(&tm);
printf("Result: year=%d, mon=%d, day=%d, hour=%d, min=%d, sec=%d, jif=%d\n",
1900 + tm.year, tm.mon, tm.day, tm.hour, tm.min, tm.sec, tm.jif);
hexdump(&tm, sizeof(tm));
printf("Done\n"); // finished
}
|
0fc343a006798fd3ee4b0802bdaf8186104aed67
|
ae31542273a142210a1ff30fb76ed9d45d38eba9
|
/src/backend/cdb/motion/ic_tcp.c
|
4e21c6a48131afa291f1395e52e4e9f12c0fe519
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"PostgreSQL",
"OpenSSL",
"LicenseRef-scancode-stream-benchmark",
"ISC",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-ssleay-windows",
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
greenplum-db/gpdb
|
8334837bceb2d5d51a684500793d11b190117c6a
|
2c0f8f0fb24a2d7a7da114dc80f5f5a2712fca50
|
refs/heads/main
| 2023-08-22T02:03:03.806269
| 2023-08-21T22:59:53
| 2023-08-22T01:17:10
| 44,781,140
| 6,417
| 2,082
|
Apache-2.0
| 2023-09-14T20:33:42
| 2015-10-23T00:25:17
|
C
|
UTF-8
|
C
| false
| false
| 88,857
|
c
|
ic_tcp.c
|
/*-------------------------------------------------------------------------
* ic_tcp.c
* Interconnect code specific to TCP transport.
*
* Portions Copyright (c) 2005-2008, Greenplum, Inc.
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
*
*
* IDENTIFICATION
* src/backend/cdb/motion/ic_tcp.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "common/ip.h"
#include "nodes/execnodes.h" /* ExecSlice, SliceTable */
#include "nodes/pg_list.h"
#include "nodes/print.h"
#include "miscadmin.h"
#include "libpq/libpq-be.h"
#include "postmaster/postmaster.h"
#include "utils/builtins.h"
#include "cdb/cdbselect.h"
#include "cdb/tupchunklist.h"
#include "cdb/ml_ipc.h"
#include "cdb/cdbvars.h"
#include "cdb/cdbdisp.h"
#ifdef ENABLE_IC_PROXY
#include "ic_proxy_backend.h"
#endif /* ENABLE_IC_PROXY */
#include <fcntl.h>
#include <limits.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <sys/time.h>
#include <netinet/in.h>
#define USECS_PER_SECOND 1000000
#define MSECS_PER_SECOND 1000
/*
* GpMonotonicTime: used to guarantee that the elapsed time is in
* the monotonic order between two gp_get_monotonic_time calls.
*/
typedef struct GpMonotonicTime
{
struct timeval beginTime;
struct timeval endTime;
} GpMonotonicTime;
static void gp_set_monotonic_begin_time(GpMonotonicTime *time);
static void gp_get_monotonic_time(GpMonotonicTime *time);
static inline uint64 gp_get_elapsed_ms(GpMonotonicTime *time);
static inline uint64 gp_get_elapsed_us(GpMonotonicTime *time);
static inline int timeCmp(struct timeval *t1, struct timeval *t2);
/*
* backlog for listen() call: it is important that this be something like a
* good match for the maximum number of QEs. Slow insert performance will
* result if it is too low.
*/
#define CONNECT_RETRY_MS 4000
#define CONNECT_AGGRESSIVERETRY_MS 500
/* listener backlog is calculated at listener-creation time */
int listenerBacklog = 128;
/* our timeout value for select() and other socket operations. */
static struct timeval tval;
static inline MotionConn *
getMotionConn(ChunkTransportStateEntry *pEntry, int iConn)
{
Assert(pEntry);
Assert(pEntry->conns);
Assert(iConn < pEntry->numConns);
return pEntry->conns + iConn;
}
static ChunkTransportStateEntry *startOutgoingConnections(ChunkTransportState *transportStates,
ExecSlice *sendSlice,
int *pOutgoingCount);
static void format_fd_set(StringInfo buf, int nfds, mpp_fd_set *fds, char *pfx, char *sfx);
static void setupOutgoingConnection(ChunkTransportState *transportStates,
ChunkTransportStateEntry *pEntry, MotionConn *conn);
static void updateOutgoingConnection(ChunkTransportState *transportStates,
ChunkTransportStateEntry *pEntry, MotionConn *conn, int errnoSave);
static void sendRegisterMessage(ChunkTransportState *transportStates, ChunkTransportStateEntry *pEntry, MotionConn *conn);
static bool readRegisterMessage(ChunkTransportState *transportStates,
MotionConn *conn);
static MotionConn *acceptIncomingConnection(void);
static void flushInterconnectListenerBacklog(void);
static void waitOnOutbound(ChunkTransportStateEntry *pEntry);
static TupleChunkListItem RecvTupleChunkFromAnyTCP(ChunkTransportState *transportStates,
int16 motNodeID,
int16 *srcRoute);
static TupleChunkListItem RecvTupleChunkFromTCP(ChunkTransportState *transportStates,
int16 motNodeID,
int16 srcRoute);
static void SendEosTCP(ChunkTransportState *transportStates,
int motNodeID, TupleChunkListItem tcItem);
static bool SendChunkTCP(ChunkTransportState *transportStates,
ChunkTransportStateEntry *pEntry, MotionConn *conn, TupleChunkListItem tcItem, int16 motionId);
static bool flushBuffer(ChunkTransportState *transportStates,
ChunkTransportStateEntry *pEntry, MotionConn *conn, int16 motionId);
static void doSendStopMessageTCP(ChunkTransportState *transportStates, int16 motNodeID);
#ifdef AMS_VERBOSE_LOGGING
static void dumpEntryConnections(int elevel, ChunkTransportStateEntry *pEntry);
static void print_connection(ChunkTransportState *transportStates, int fd, const char *msg);
#endif
/*
* setupTCPListeningSocket
*/
static void
setupTCPListeningSocket(int backlog, int *listenerSocketFd, uint16 *listenerPort)
{
int errnoSave;
int fd = -1;
const char *fun;
*listenerSocketFd = -1;
*listenerPort = 0;
struct sockaddr_storage addr;
socklen_t addrlen;
struct addrinfo hints;
struct addrinfo *addrs,
*rp;
int s;
char service[32];
/*
* we let the system pick the TCP port here so we don't have to manage
* port resources ourselves. So set the port to 0 (any port)
*/
snprintf(service, 32, "%d", 0);
memset(&hints, 0, sizeof(struct addrinfo));
hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
hints.ai_socktype = SOCK_STREAM; /* Two-way, out of band connection */
hints.ai_protocol = 0; /* Any protocol - TCP implied for network use due to SOCK_STREAM */
if (Gp_interconnect_address_type == INTERCONNECT_ADDRESS_TYPE_UNICAST)
{
Assert(interconnect_address && strlen(interconnect_address) > 0);
hints.ai_flags |= AI_NUMERICHOST;
ereportif(gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG, DEBUG3,
(errmsg("getaddrinfo called with unicast address: %s",
interconnect_address)));
}
else
{
Assert(interconnect_address == NULL);
hints.ai_flags |= AI_PASSIVE;
ereportif(gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG, DEBUG3,
(errmsg("getaddrinfo called with wildcard address")));
}
s = getaddrinfo(interconnect_address, service, &hints, &addrs);
if (s != 0)
elog(ERROR, "getaddrinfo says %s", gai_strerror(s));
/*
* getaddrinfo() returns a list of address structures, one for each valid
* address and family we can use.
*
* Try each address until we successfully bind. If socket (or bind) fails,
* we (close the socket and) try the next address. This can happen if the
* system supports IPv6, but IPv6 is disabled from working, or if it
* supports IPv6 and IPv4 is disabled.
*/
/*
* If there is both an AF_INET6 and an AF_INET choice, we prefer the
* AF_INET6, because on UNIX it can receive either protocol, whereas
* AF_INET can only get IPv4. Otherwise we'd need to bind two sockets,
* one for each protocol.
*
* Why not just use AF_INET6 in the hints? That works perfect if we know
* this machine supports IPv6 and IPv6 is enabled, but we don't know that.
*/
#ifdef HAVE_IPV6
if (addrs->ai_family == AF_INET && addrs->ai_next != NULL && addrs->ai_next->ai_family == AF_INET6)
{
/*
* We got both an INET and INET6 possibility, but we want to prefer
* the INET6 one if it works. Reverse the order we got from
* getaddrinfo so that we try things in our preferred order. If we got
* more possibilities (other AFs??), I don't think we care about them,
* so don't worry if the list is more that two, we just rearrange the
* first two.
*/
struct addrinfo *temp = addrs->ai_next; /* second node */
addrs->ai_next = addrs->ai_next->ai_next; /* point old first node to
* third node if any */
temp->ai_next = addrs; /* point second node to first */
addrs = temp; /* start the list with the old second node */
}
#endif
for (rp = addrs; rp != NULL; rp = rp->ai_next)
{
/*
* getaddrinfo gives us all the parameters for the socket() call as
* well as the parameters for the bind() call.
*/
fd = socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol);
if (fd == -1)
continue;
/*
* we let the system pick the TCP port here so we don't have to manage
* port resources ourselves.
*/
if (bind(fd, rp->ai_addr, rp->ai_addrlen) == 0)
break; /* Success */
close(fd);
fd = -1;
}
fun = "bind";
if (fd == -1)
goto error;
/* Make socket non-blocking. */
fun = "fcntl(O_NONBLOCK)";
if (!pg_set_noblock(fd))
goto error;
fun = "listen";
if (listen(fd, backlog) < 0)
goto error;
/* Get the listening socket's port number. */
fun = "getsockname";
addrlen = sizeof(addr);
if (getsockname(fd, (struct sockaddr *) &addr, &addrlen) < 0)
goto error;
/* Give results to caller. */
*listenerSocketFd = fd;
/* display which port was chosen by the system. */
if (addr.ss_family == AF_INET6)
*listenerPort = ntohs(((struct sockaddr_in6 *) &addr)->sin6_port);
else
*listenerPort = ntohs(((struct sockaddr_in *) &addr)->sin_port);
freeaddrinfo(addrs);
return;
error:
errnoSave = errno;
if (fd >= 0)
closesocket(fd);
errno = errnoSave;
freeaddrinfo(addrs);
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect Error: Could not set up tcp listener socket"),
errdetail("%s: %m", fun)));
} /* setupListeningSocket */
/*
* Initialize TCP specific comms.
*/
void
InitMotionTCP(int *listenerSocketFd, uint16 *listenerPort)
{
tval.tv_sec = 0;
tval.tv_usec = 500000;
setupTCPListeningSocket(listenerBacklog, listenerSocketFd, listenerPort);
return;
}
/* cleanup any TCP-specific comms info */
void
CleanupMotionTCP(void)
{
/* nothing to do. */
return;
}
/* Function readPacket() is used to read in the next packet from the given
* MotionConn.
*
* This call blocks until the packet is read in, and is part of a
* global scheme where senders block until the entire message is sent, and
* receivers block until the entire message is read. Both use non-blocking
* socket calls so that we can handle any PG interrupts.
*
* Note, that for speed we want to read a message all in one go,
* header and all. A consequence is that we may read in part of the
* next message, which we've got to keep track of ... recvBytes holds
* the byte-count of the unprocessed messages.
*
* PARAMETERS
* conn - MotionConn to read the packet from.
*
*/
/* static inline void */
void
readPacket(MotionConn *conn, ChunkTransportState *transportStates)
{
int n,
bytesRead = conn->recvBytes;
bool gotHeader = false,
gotPacket = false;
mpp_fd_set rset;
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG5, "readpacket: (fd %d) (max %d) outstanding bytes %d", conn->sockfd, Gp_max_packet_size, conn->recvBytes);
#endif
/* do we have a complete message waiting to be processed ? */
if (conn->recvBytes >= PACKET_HEADER_SIZE)
{
memcpy(&conn->msgSize, conn->msgPos, sizeof(uint32));
gotHeader = true;
if (conn->recvBytes >= conn->msgSize)
{
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG5, "readpacket: returning previously read data (%d)", conn->recvBytes);
#endif
return;
}
}
/*
* partial message waiting in recv buffer! Move to head of buffer:
* eliminate the slack (which will always be at the beginning) in the
* buffer
*/
if (conn->recvBytes != 0)
memmove(conn->pBuff, conn->msgPos, conn->recvBytes);
conn->msgPos = conn->pBuff;
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG5, "readpacket: %s on previous call msgSize %d", gotHeader ? "got header" : "no header", conn->msgSize);
#endif
while (!gotPacket && bytesRead < Gp_max_packet_size)
{
/* see if user canceled and stuff like that */
ML_CHECK_FOR_INTERRUPTS(transportStates->teardownActive);
/*
* we read at the end of the buffer, we've eliminated any slack above
*/
if ((n = recv(conn->sockfd, conn->pBuff + bytesRead,
Gp_max_packet_size - bytesRead, 0)) < 0)
{
if (errno == EINTR)
continue;
if (errno == EWOULDBLOCK)
{
int retry = 0;
do
{
struct timeval timeout = tval;
/* check for the QD cancel for every 2 seconds */
if (retry++ > 4)
{
retry = 0;
/* check to see if the dispatcher should cancel */
if (Gp_role == GP_ROLE_DISPATCH)
{
checkForCancelFromQD(transportStates);
}
}
/* see if user canceled and stuff like that */
ML_CHECK_FOR_INTERRUPTS(transportStates->teardownActive);
MPP_FD_ZERO(&rset);
MPP_FD_SET(conn->sockfd, &rset);
n = select(conn->sockfd + 1, (fd_set *) &rset, NULL, NULL, &timeout);
if (n == 0 || (n < 0 && errno == EINTR))
continue;
else if (n < 0)
{
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error reading an incoming packet"),
errdetail("select from seg%d at %s: %m",
conn->remoteContentId,
conn->remoteHostAndPort)));
}
}
while (n < 1);
}
else
{
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error reading an incoming packet"),
errdetail("read from seg%d at %s: %m",
conn->remoteContentId,
conn->remoteHostAndPort)));
}
}
else if (n == 0)
{
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG5, "readpacket(); breaking in while (fd %d) recvBytes %d msgSize %d", conn->sockfd, conn->recvBytes, conn->msgSize);
print_connection(transportStates, conn->sockfd, "interconnect error on");
#endif
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error: connection closed prematurely"),
errdetail("from Remote Connection: contentId=%d at %s",
conn->remoteContentId, conn->remoteHostAndPort)));
break;
}
else
{
bytesRead += n;
if (!gotHeader && bytesRead >= PACKET_HEADER_SIZE)
{
/* got the header */
memcpy(&conn->msgSize, conn->msgPos, sizeof(uint32));
gotHeader = true;
}
conn->recvBytes = bytesRead;
if (gotHeader && bytesRead >= conn->msgSize)
gotPacket = true;
}
}
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG5, "readpacket: got %d bytes", conn->recvBytes);
#endif
}
static void
flushIncomingData(int fd)
{
static char trash[8192];
int bytes;
/*
* If we're in TeardownInterconnect, we should only have to call recv() a
* couple of times to empty out our socket buffers
*/
do
{
bytes = recv(fd, trash, sizeof(trash), 0);
} while (bytes > 0);
}
/* Function startOutgoingConnections() is used to initially kick-off any outgoing
* connections for mySlice.
*
* This should not be called for root slices (i.e. QD ones) since they don't
* ever have outgoing connections.
*
* PARAMETERS
*
* sendSlice - Slice that this process is member of.
* pIncIdx - index in the parent slice list of myslice.
*
* RETURNS
* Initialized ChunkTransportState for the Sending Motion Node Id.
*/
static ChunkTransportStateEntry *
startOutgoingConnections(ChunkTransportState *transportStates,
ExecSlice *sendSlice,
int *pOutgoingCount)
{
ChunkTransportStateEntry *pEntry;
MotionConn *conn;
ListCell *cell;
ExecSlice *recvSlice;
CdbProcess *cdbProc;
*pOutgoingCount = 0;
recvSlice = &transportStates->sliceTable->slices[sendSlice->parentIndex];
if (gp_interconnect_aggressive_retry)
{
if ((list_length(recvSlice->children) * list_length(sendSlice->segments)) > listenerBacklog)
transportStates->aggressiveRetry = true;
}
else
transportStates->aggressiveRetry = false;
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
elog(DEBUG4, "Interconnect seg%d slice%d setting up sending motion node (aggressive retry is %s)",
GpIdentity.segindex, sendSlice->sliceIndex,
(transportStates->aggressiveRetry ? "active" : "inactive"));
pEntry = createChunkTransportState(transportStates,
sendSlice,
recvSlice,
list_length(recvSlice->primaryProcesses));
/*
* Setup a MotionConn entry for each of our outbound connections. Request
* a connection to each receiving backend's listening port.
*/
conn = pEntry->conns;
foreach(cell, recvSlice->primaryProcesses)
{
cdbProc = (CdbProcess *) lfirst(cell);
if (cdbProc)
{
conn->cdbProc = cdbProc;
conn->pBuff = palloc(Gp_max_packet_size);
conn->state = mcsSetupOutgoingConnection;
(*pOutgoingCount)++;
}
conn++;
}
return pEntry;
} /* startOutgoingConnections */
/*
* setupOutgoingConnection
*
* Called by SetupInterconnect when conn->state == mcsSetupOutgoingConnection.
*
* On return, state is:
* mcsSetupOutgoingConnection if failed and caller should retry.
* mcsConnecting if non-blocking connect() is pending. Caller should
* send registration message when socket becomes write-ready.
* mcsSendRegMsg or mcsStarted if connect() completed successfully.
*/
static void
setupOutgoingConnection(ChunkTransportState *transportStates, ChunkTransportStateEntry *pEntry, MotionConn *conn)
{
CdbProcess *cdbProc = conn->cdbProc;
int n;
int ret;
char portNumberStr[32];
char *service;
struct addrinfo *addrs = NULL;
struct addrinfo hint;
Assert(conn->cdbProc);
Assert(conn->state == mcsSetupOutgoingConnection);
conn->wakeup_ms = 0;
conn->remoteContentId = cdbProc->contentid;
/*
* record the destination IP addr and port for error messages. Since the
* IP addr might be IPv6, it might have ':' embedded, so in that case, put
* '[]' around it so we can see that the string is an IP and port
* (otherwise it might look just like an IP).
*/
if (strchr(cdbProc->listenerAddr, ':') != 0)
snprintf(conn->remoteHostAndPort, sizeof(conn->remoteHostAndPort),
"[%s]:%d", cdbProc->listenerAddr, cdbProc->listenerPort);
else
snprintf(conn->remoteHostAndPort, sizeof(conn->remoteHostAndPort),
"%s:%d", cdbProc->listenerAddr, cdbProc->listenerPort);
/* Might be retrying due to connection failure etc. Close old socket. */
if (conn->sockfd >= 0)
{
closesocket(conn->sockfd);
conn->sockfd = -1;
}
#ifdef ENABLE_IC_PROXY
if (Gp_interconnect_type == INTERCONNECT_TYPE_PROXY)
{
/*
* Using libuv pipe to register backend to proxy.
* ic_proxy_backend_connect only appends the connect request into
* connection queue and waits for the libuv_run_loop to handle the queue.
*/
ic_proxy_backend_connect(transportStates->proxyContext,
pEntry, conn, true);
conn->pBuff = palloc(Gp_max_packet_size);
conn->recvBytes = 0;
conn->msgPos = NULL;
conn->msgSize = PACKET_HEADER_SIZE;
conn->state = mcsStarted;
conn->stillActive = true;
conn->tupleCount = 0;
conn->remoteContentId = conn->cdbProc->contentid;
return;
}
#endif /* ENABLE_IC_PROXY */
/* Initialize hint structure */
MemSet(&hint, 0, sizeof(hint));
hint.ai_socktype = SOCK_STREAM;
hint.ai_family = AF_UNSPEC; /* Allow for IPv4 or IPv6 */
#ifdef AI_NUMERICSERV
hint.ai_flags = AI_NUMERICHOST | AI_NUMERICSERV; /* Never do name
* resolution */
#else
hint.ai_flags = AI_NUMERICHOST; /* Never do name resolution */
#endif
snprintf(portNumberStr, sizeof(portNumberStr), "%d", cdbProc->listenerPort);
service = portNumberStr;
ret = pg_getaddrinfo_all(cdbProc->listenerAddr, service, &hint, &addrs);
if (ret || !addrs)
{
if (addrs)
pg_freeaddrinfo_all(hint.ai_family, addrs);
ereport(ERROR,
(errmsg("could not translate host addr \"%s\", port \"%d\" to address: %s",
cdbProc->listenerAddr, cdbProc->listenerPort, gai_strerror(ret))));
return;
}
/*
* Since we aren't using name resolution, getaddrinfo will return only 1
* entry
*/
/*
* Create a socket. getaddrinfo() returns the parameters needed by
* socket()
*/
conn->sockfd = socket(addrs->ai_family, addrs->ai_socktype, addrs->ai_protocol);
if (conn->sockfd < 0)
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error setting up outgoing connection"),
errdetail("%s: %m", "socket")));
/* make socket non-blocking BEFORE we connect. */
if (!pg_set_noblock(conn->sockfd))
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error setting up outgoing connection"),
errdetail("%s: %m", "fcntl(O_NONBLOCK)")));
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
ereport(DEBUG1, (errmsg("Interconnect connecting to seg%d slice%d %s "
"pid=%d sockfd=%d",
conn->remoteContentId,
pEntry->recvSlice->sliceIndex,
conn->remoteHostAndPort,
conn->cdbProc->pid,
conn->sockfd)));
/*
* Initiate the connection.
*/
for (;;)
{ /* connect() EINTR retry loop */
ML_CHECK_FOR_INTERRUPTS(transportStates->teardownActive);
n = connect(conn->sockfd, addrs->ai_addr, addrs->ai_addrlen);
/* Non-blocking socket never connects immediately, but check anyway. */
if (n == 0)
{
sendRegisterMessage(transportStates, pEntry, conn);
pg_freeaddrinfo_all(hint.ai_family, addrs);
return;
}
/* Retry if a signal was received. */
if (errno == EINTR)
continue;
/* Normal case: select() will tell us when connection is made. */
if (errno == EINPROGRESS ||
errno == EWOULDBLOCK)
{
conn->state = mcsConnecting;
pg_freeaddrinfo_all(hint.ai_family, addrs);
return;
}
pg_freeaddrinfo_all(hint.ai_family, addrs);
/* connect() failed. Log the error. Caller should retry. */
updateOutgoingConnection(transportStates, pEntry, conn, errno);
return;
} /* connect() EINTR retry loop */
} /* setupOutgoingConnection */
/*
* updateOutgoingConnection
*
* Called when connect() succeeds or fails.
*/
static void
updateOutgoingConnection(ChunkTransportState *transportStates, ChunkTransportStateEntry *pEntry, MotionConn *conn, int errnoSave)
{
socklen_t sizeoferrno = sizeof(errnoSave);
/* Get errno value indicating success or failure. */
if (errnoSave == -1 &&
getsockopt(conn->sockfd, SOL_SOCKET, SO_ERROR,
(void *) &errnoSave, &sizeoferrno))
{
/* getsockopt failed */
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect could not connect to seg%d %s",
conn->remoteContentId, conn->remoteHostAndPort),
errdetail("%s sockfd=%d: %m",
"getsockopt(SO_ERROR)", conn->sockfd)));
}
switch (errnoSave)
{
/* Success! Advance to next state. */
case 0:
sendRegisterMessage(transportStates, pEntry, conn);
return;
default:
errno = errnoSave;
ereport(LOG,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect could not connect to seg%d %s pid=%d; will retry; %s: %m",
conn->remoteContentId, conn->remoteHostAndPort,
conn->cdbProc->pid, "connect")));
break;
}
/* Tell caller to close the socket and try again. */
conn->state = mcsSetupOutgoingConnection;
} /* updateOutgoingConnection */
/* Function sendRegisterMessage() used to send a Register message to the
* remote destination on the other end of the provided conn.
*
* PARAMETERS
*
* pEntry - ChunkTransportState.
* conn - MotionConn to send message out on.
*
* Called by SetupInterconnect when conn->state == mcsSetupOutgoingConnection.
*
* On return, state is:
* mcsSendRegMsg if registration message has not been completely sent.
* Caller should retry when socket becomes write-ready.
* mcsStarted if registration message has been sent. Caller can start
* sending data.
*/
static void
sendRegisterMessage(ChunkTransportState *transportStates, ChunkTransportStateEntry *pEntry, MotionConn *conn)
{
int bytesToSend;
int bytesSent;
SliceTable *sliceTbl = transportStates->sliceTable;
if (conn->state != mcsSendRegMsg)
{
RegisterMessage *regMsg = (RegisterMessage *) conn->pBuff;
struct sockaddr_storage localAddr;
socklen_t addrsize;
Assert(conn->cdbProc &&
conn->pBuff &&
sizeof(*regMsg) <= Gp_max_packet_size);
/* Save local host and port for log messages. */
addrsize = sizeof(localAddr);
if (getsockname(conn->sockfd, (struct sockaddr *) &localAddr, &addrsize))
{
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error after making connection"),
errdetail("getsockname sockfd=%d remote=%s: %m",
conn->sockfd, conn->remoteHostAndPort)));
}
format_sockaddr(&localAddr, conn->localHostAndPort,
sizeof(conn->localHostAndPort));
if (gp_log_interconnect >= GPVARS_VERBOSITY_VERBOSE)
ereport(LOG,
(errmsg("interconnect sending registration message to seg%d slice%d %s pid=%d from seg%d slice%d %s sockfd=%d",
conn->remoteContentId,
pEntry->recvSlice->sliceIndex,
conn->remoteHostAndPort,
conn->cdbProc->pid,
GpIdentity.segindex,
pEntry->sendSlice->sliceIndex,
conn->localHostAndPort,
conn->sockfd)));
regMsg->msgBytes = sizeof(*regMsg);
regMsg->recvSliceIndex = pEntry->recvSlice->sliceIndex;
regMsg->sendSliceIndex = pEntry->sendSlice->sliceIndex;
regMsg->srcContentId = GpIdentity.segindex;
regMsg->srcListenerPort = Gp_listener_port & 0x0ffff;
regMsg->srcPid = MyProcPid;
regMsg->srcSessionId = gp_session_id;
regMsg->srcCommandCount = sliceTbl->ic_instance_id;
conn->state = mcsSendRegMsg;
conn->msgPos = conn->pBuff;
conn->msgSize = sizeof(*regMsg);
}
/* Send as much as we can. */
for (;;)
{
bytesToSend = conn->pBuff + conn->msgSize - conn->msgPos;
bytesSent = send(conn->sockfd, conn->msgPos, bytesToSend, 0);
if (bytesSent == bytesToSend)
break;
else if (bytesSent >= 0)
conn->msgPos += bytesSent;
else if (errno == EWOULDBLOCK)
return; /* call me again to send the rest */
else if (errno == EINTR)
ML_CHECK_FOR_INTERRUPTS(transportStates->teardownActive);
else
{
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error writing registration message to seg%d at %s",
conn->remoteContentId,
conn->remoteHostAndPort),
errdetail("write pid=%d sockfd=%d local=%s: %m",
conn->cdbProc->pid,
conn->sockfd,
conn->localHostAndPort)));
}
}
/* Sent it all. */
conn->state = mcsStarted;
conn->msgPos = NULL;
conn->msgSize = PACKET_HEADER_SIZE;
conn->stillActive = true;
} /* sendRegisterMessage */
/* Function readRegisterMessage() reads a "Register" message off of the conn
* and places it in the right MotionLayerEntry conn slot based on the contents
* of the register message.
*
* PARAMETERS
*
* conn - MotionConn to read the register messagefrom.
*
* Returns true if message has been received; or false if caller must retry
* when socket becomes read-ready.
*/
static bool
readRegisterMessage(ChunkTransportState *transportStates,
MotionConn *conn)
{
int bytesToReceive;
int bytesReceived;
int iconn;
RegisterMessage *regMsg;
RegisterMessage msg;
MotionConn *newConn;
ChunkTransportStateEntry *pEntry = NULL;
CdbProcess *cdbproc = NULL;
ListCell *lc;
SliceTable *sliceTbl = transportStates->sliceTable;
/* Get ready to receive the Register message. */
if (conn->state != mcsRecvRegMsg)
{
conn->state = mcsRecvRegMsg;
conn->msgSize = sizeof(*regMsg);
conn->msgPos = conn->pBuff;
Assert(conn->pBuff &&
sizeof(*regMsg) <= Gp_max_packet_size);
}
/* Receive all that is available, up to the expected message size. */
for (;;)
{
bytesToReceive = conn->pBuff + conn->msgSize - conn->msgPos;
bytesReceived = recv(conn->sockfd, conn->msgPos, bytesToReceive, 0);
if (bytesReceived == bytesToReceive)
break;
else if (bytesReceived > 0)
conn->msgPos += bytesReceived;
else if (bytesReceived == 0)
{
elog(LOG, "Interconnect error reading register message from %s: connection closed",
conn->remoteHostAndPort);
/* maybe this peer is already retrying ? */
goto old_conn;
}
else if (errno == EWOULDBLOCK)
return false; /* call me again to receive the rest */
else if (errno == EINTR)
ML_CHECK_FOR_INTERRUPTS(transportStates->teardownActive);
else
{
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error reading register message from %s",
conn->remoteHostAndPort),
errdetail("read sockfd=%d local=%s: %m",
conn->sockfd,
conn->localHostAndPort)));
}
}
/*
* Got the whole message. Convert fields to native byte order.
*/
regMsg = (RegisterMessage *) conn->pBuff;
msg.msgBytes = regMsg->msgBytes;
msg.recvSliceIndex = regMsg->recvSliceIndex;
msg.sendSliceIndex = regMsg->sendSliceIndex;
msg.srcContentId = regMsg->srcContentId;
msg.srcListenerPort = regMsg->srcListenerPort;
msg.srcPid = regMsg->srcPid;
msg.srcSessionId = regMsg->srcSessionId;
msg.srcCommandCount = regMsg->srcCommandCount;
/* Check for valid message format. */
if (msg.msgBytes != sizeof(*regMsg))
{
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error reading register message from %s: format not recognized",
conn->remoteHostAndPort),
errdetail("msgBytes=%d expected=%d sockfd=%d local=%s",
msg.msgBytes, (int) sizeof(*regMsg),
conn->sockfd, conn->localHostAndPort)));
}
/* get rid of old connections first */
if (msg.srcSessionId != gp_session_id ||
msg.srcCommandCount < sliceTbl->ic_instance_id)
{
/*
* This is an old connection, which can be safely ignored. We get this
* kind of stuff for cases in which one gang participating in the
* interconnect exited a query before calling SetupInterconnect().
* Later queries wind up receiving their registration messages.
*/
elog(LOG, "Received invalid, old registration message: "
"will ignore ('expected:received' session %d:%d ic-id %d:%d)",
gp_session_id, msg.srcSessionId,
sliceTbl->ic_instance_id, msg.srcCommandCount);
goto old_conn;
}
/* Verify that the message pertains to one of our receiving Motion nodes. */
if (msg.sendSliceIndex > 0 &&
msg.sendSliceIndex <= transportStates->size &&
msg.recvSliceIndex == transportStates->sliceId &&
msg.srcContentId >= -1)
{
/* this is a good connection */
}
else
{
/* something is wrong */
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error: Invalid registration message received from %s",
conn->remoteHostAndPort),
errdetail("sendSlice=%d recvSlice=%d srcContentId=%d srcPid=%d "
"srcListenerPort=%d srcSessionId=%d srcCommandCount=%d "
"motnode=%d transportStates->size=%d "
"transportStates->sliceId=%d",
msg.sendSliceIndex, msg.recvSliceIndex,
msg.srcContentId, msg.srcPid,
msg.srcListenerPort, msg.srcSessionId,
msg.srcCommandCount, msg.sendSliceIndex,
transportStates->size, transportStates->sliceId)));
}
/*
* Find state info for the specified Motion node. The sender's slice
* number equals the motion node id.
*/
getChunkTransportState(transportStates, msg.sendSliceIndex, &pEntry);
Assert(pEntry);
foreach_with_count(lc, pEntry->sendSlice->primaryProcesses, iconn)
{
cdbproc = (CdbProcess *)lfirst(lc);
if (!cdbproc)
continue;
if (msg.srcContentId == cdbproc->contentid &&
msg.srcListenerPort == cdbproc->listenerPort &&
msg.srcPid == cdbproc->pid)
break;
}
if (iconn == list_length(pEntry->sendSlice->primaryProcesses))
{
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error: Invalid registration message received from %s",
conn->remoteHostAndPort),
errdetail("sendSlice=%d recvSlice=%d srcContentId=%d srcPid=%d "
"srcListenerPort=%d srcSessionId=%d srcCommandCount=%d "
"motnode=%d iconn=%d",
msg.sendSliceIndex, msg.recvSliceIndex,
msg.srcContentId, msg.srcPid,
msg.srcListenerPort, msg.srcSessionId,
msg.srcCommandCount, msg.sendSliceIndex, iconn)));
}
/*
* Allocate MotionConn slot corresponding to sender's position in the
* sending slice's CdbProc list.
*/
newConn = getMotionConn(pEntry, iconn);
if (newConn->sockfd != -1 ||
newConn->state != mcsNull)
{
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error: Duplicate registration message received from %s",
conn->remoteHostAndPort),
errdetail("Already accepted registration from %s for sendSlice=%d srcContentId=%d srcPid=%d srcListenerPort=%d",
newConn->remoteHostAndPort, msg.sendSliceIndex,
msg.srcContentId, msg.srcPid, msg.srcListenerPort)));
}
/* message looks good */
if (gp_log_interconnect >= GPVARS_VERBOSITY_VERBOSE)
{
ereport(LOG,
(errmsg("interconnect seg%d slice%d sockfd=%d accepted registration message from seg%d slice%d %s pid=%d",
GpIdentity.segindex, msg.recvSliceIndex, conn->sockfd,
msg.srcContentId, msg.sendSliceIndex,
conn->remoteHostAndPort, msg.srcPid)));
}
/* Copy caller's temporary MotionConn to its assigned slot. */
*newConn = *conn;
newConn->cdbProc = cdbproc;
newConn->remoteContentId = msg.srcContentId;
/*
* The caller's MotionConn object is no longer valid.
*/
MemSet(conn, 0, sizeof(*conn));
conn->state = mcsNull;
/*
* Prepare to begin reading tuples.
*/
newConn->state = mcsStarted;
newConn->msgPos = NULL;
newConn->msgSize = 0;
newConn->stillActive = true;
MPP_FD_SET(newConn->sockfd, &pEntry->readSet);
if (newConn->sockfd > pEntry->highReadSock)
pEntry->highReadSock = newConn->sockfd;
#ifdef AMS_VERBOSE_LOGGING
dumpEntryConnections(DEBUG4, pEntry);
#endif
/* we've completed registration of this connection */
return true;
old_conn:
shutdown(conn->sockfd, SHUT_RDWR);
closesocket(conn->sockfd);
conn->sockfd = -1;
pfree(conn->pBuff);
conn->pBuff = NULL;
/*
* this connection is done, but with sockfd == -1 isn't a "success"
*/
return true;
} /* readRegisterMessage */
/*
* acceptIncomingConnection
*
* accept() a connection request that is pending on the listening socket.
* Returns a newly palloc'ed MotionConn object; or NULL if the listening
* socket does not have any pending connection requests.
*/
static MotionConn *
acceptIncomingConnection(void)
{
int newsockfd;
socklen_t addrsize;
MotionConn *conn;
struct sockaddr_storage remoteAddr;
struct sockaddr_storage localAddr;
/*
* Accept a connection.
*/
for (;;)
{ /* loop until success or EWOULDBLOCK */
MemSet(&remoteAddr, 0, sizeof(remoteAddr));
addrsize = sizeof(remoteAddr);
newsockfd = accept(TCP_listenerFd, (struct sockaddr *) &remoteAddr, &addrsize);
if (newsockfd >= 0)
break;
switch (errno)
{
case EINTR:
/* A signal arrived. Loop to retry the accept(). */
break;
case EWOULDBLOCK:
/* Connection request queue is empty. Normal return. */
return NULL;
case EBADF:
case EFAULT:
case EINVAL:
#ifndef _WIN32
case ENOTSOCK:
#endif
case EOPNOTSUPP:
/* Shouldn't get these errors unless there is a bug. */
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error on listener port %d",
Gp_listener_port),
errdetail("accept sockfd=%d: %m", TCP_listenerFd)));
break; /* not reached */
case ENOMEM:
case ENFILE:
case EMFILE:
case ENOBUFS:
/* Out of resources. */
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error on listener port %d",
Gp_listener_port),
errdetail("accept sockfd=%d: %m", TCP_listenerFd)));
break; /* not reached */
default:
/* Network problem, connection aborted, etc. Continue. */
ereport(LOG,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect connection request not completed on listener port %d",
Gp_listener_port),
errdetail("accept sockfd=%d: %m", TCP_listenerFd)));
} /* switch (errno) */
} /* loop until success or EWOULDBLOCK */
/*
* Create a MotionConn object to hold the connection state.
*/
conn = palloc0(sizeof(MotionConn));
conn->sockfd = newsockfd;
conn->pBuff = palloc(Gp_max_packet_size);
conn->msgSize = 0;
conn->recvBytes = 0;
conn->msgPos = 0;
conn->tupleCount = 0;
conn->stillActive = false;
conn->state = mcsAccepted;
conn->remoteContentId = -2;
/* Save remote and local host:port strings for error messages. */
format_sockaddr(&remoteAddr, conn->remoteHostAndPort,
sizeof(conn->remoteHostAndPort));
addrsize = sizeof(localAddr);
if (getsockname(newsockfd, (struct sockaddr *) &localAddr, &addrsize))
{
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error after accepting connection"),
errdetail("getsockname sockfd=%d remote=%s: %m",
newsockfd, conn->remoteHostAndPort)));
}
format_sockaddr(&localAddr, conn->localHostAndPort,
sizeof(conn->localHostAndPort));
/* make socket non-blocking */
if (!pg_set_noblock(newsockfd))
{
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error after accepting connection"),
errdetail("fcntl(O_NONBLOCK) sockfd=%d remote=%s local=%s: %m",
newsockfd, conn->remoteHostAndPort,
conn->localHostAndPort)));
}
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
elog(DEBUG4, "Interconnect got incoming connection "
"from remote=%s to local=%s sockfd=%d",
conn->remoteHostAndPort, conn->localHostAndPort, newsockfd);
return conn;
} /* acceptIncomingConnection */
/* See ml_ipc.h */
void
SetupTCPInterconnect(EState *estate)
{
int i,
index,
n;
ListCell *cell;
ExecSlice *mySlice;
ExecSlice *aSlice;
MotionConn *conn;
SliceTable *sliceTable = estate->es_sliceTable;
int incoming_count = 0;
int outgoing_count = 0;
int expectedTotalIncoming = 0;
int expectedTotalOutgoing = 0;
int iteration = 0;
GpMonotonicTime startTime;
StringInfoData logbuf;
uint64 elapsed_ms = 0;
uint64 last_qd_check_ms = 0;
/* we can have at most one of these. */
ChunkTransportStateEntry *sendingChunkTransportState = NULL;
ChunkTransportState *interconnect_context;
SIMPLE_FAULT_INJECTOR("interconnect_setup_palloc");
interconnect_context = palloc0(sizeof(ChunkTransportState));
/* initialize state variables */
Assert(interconnect_context->size == 0);
interconnect_context->estate = estate;
interconnect_context->size = CTS_INITIAL_SIZE;
interconnect_context->states = palloc0(CTS_INITIAL_SIZE * sizeof(ChunkTransportStateEntry));
interconnect_context->teardownActive = false;
interconnect_context->activated = false;
interconnect_context->networkTimeoutIsLogged = false;
interconnect_context->incompleteConns = NIL;
interconnect_context->sliceTable = copyObject(sliceTable);
interconnect_context->sliceId = sliceTable->localSlice;
interconnect_context->RecvTupleChunkFrom = RecvTupleChunkFromTCP;
interconnect_context->RecvTupleChunkFromAny = RecvTupleChunkFromAnyTCP;
interconnect_context->SendEos = SendEosTCP;
interconnect_context->SendChunk = SendChunkTCP;
interconnect_context->doSendStopMessage = doSendStopMessageTCP;
#ifdef ENABLE_IC_PROXY
ic_proxy_backend_init_context(interconnect_context);
#endif /* ENABLE_IC_PROXY */
mySlice = &interconnect_context->sliceTable->slices[sliceTable->localSlice];
Assert(sliceTable &&
mySlice->sliceIndex == sliceTable->localSlice);
gp_set_monotonic_begin_time(&startTime);
/* now we'll do some setup for each of our Receiving Motion Nodes. */
foreach(cell, mySlice->children)
{
int totalNumProcs;
int childId = lfirst_int(cell);
ChunkTransportStateEntry *pEntry = NULL;
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG5, "Setting up RECEIVING motion node %d", childId);
#endif
aSlice = &interconnect_context->sliceTable->slices[childId];
/*
* If we're using directed-dispatch we have dummy primary-process
* entries, so we count the entries.
*/
totalNumProcs = list_length(aSlice->primaryProcesses);
pEntry = createChunkTransportState(interconnect_context, aSlice, mySlice, totalNumProcs);
for (i = 0; i < totalNumProcs; i++)
{
CdbProcess *cdbProc;
cdbProc = list_nth(aSlice->primaryProcesses, i);
if (cdbProc)
expectedTotalIncoming++;
#ifdef ENABLE_IC_PROXY
if (Gp_interconnect_type == INTERCONNECT_TYPE_PROXY)
{
conn = &pEntry->conns[i];
conn->cdbProc = list_nth(aSlice->primaryProcesses, i);
if (conn->cdbProc)
{
incoming_count++;
/*
* Using libuv pipe to register backend to proxy.
* ic_proxy_backend_connect only appends the connect request
* into connection queue and waits for the libuv_run_loop to
* handle the queue.
*/
ic_proxy_backend_connect(interconnect_context->proxyContext,
pEntry, conn, false /* isSender */);
conn->pBuff = palloc(Gp_max_packet_size);
conn->recvBytes = 0;
conn->msgPos = NULL;
conn->msgSize = 0;
conn->state = mcsStarted;
conn->stillActive = true;
conn->tupleCount = 0;
conn->remoteContentId = conn->cdbProc->contentid;
conn->remapper = CreateTupleRemapper();
}
}
#endif /* ENABLE_IC_PROXY */
}
}
/*
* Initiate outgoing connections.
*
* startOutgoingConnections() and createChunkTransportState() must not be
* called during the lifecycle of sendingChunkTransportState, they will
* repalloc() interconnect_context->states so sendingChunkTransportState
* points to invalid memory.
*/
if (mySlice->parentIndex != -1)
sendingChunkTransportState = startOutgoingConnections(interconnect_context, mySlice, &expectedTotalOutgoing);
#ifdef ENABLE_IC_PROXY
if (Gp_interconnect_type == INTERCONNECT_TYPE_PROXY)
{
for (i = 0; i < expectedTotalOutgoing; i++)
{
conn = &sendingChunkTransportState->conns[i];
setupOutgoingConnection(interconnect_context,
sendingChunkTransportState, conn);
}
outgoing_count = expectedTotalOutgoing;
}
/*
* Before ic_proxy_backend_run_loop, we have already gone though all the
* incoming and outgoing connections and append them into the connect queue.
* ic_proxy_backend_run_loop will trigger the uv_loop and begin to handle
* the connect event in parallel and asynchronous way.
*
* Note that the domain socket fds are binded to libuv pipe handle, but we
* still depends on ic_tcp code to send/recv interconnect data based on
* these fds and close these fds in teardown function. As a result, we
* should not touch the libuv pipe handles until ic_tcp close all the fds in
* teardown function. In future, we should retire the ic_tcp code in ic_proxy
* backend and use libuv to handle connection setup, data transfer and
* teardown in a unified way.
*/
ic_proxy_backend_run_loop(interconnect_context->proxyContext);
#endif /* ENABLE_IC_PROXY */
if (expectedTotalIncoming > listenerBacklog)
ereport(WARNING, (errmsg("SetupTCPInterconnect: too many expected incoming connections(%d), Interconnect setup might possibly fail", expectedTotalIncoming),
errhint("Try enlarging the gp_interconnect_tcp_listener_backlog GUC value and OS net.core.somaxconn parameter")));
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
ereport(DEBUG1, (errmsg("SetupInterconnect will activate "
"%d incoming, %d outgoing routes. "
"Listening on port=%d sockfd=%d.",
expectedTotalIncoming, expectedTotalOutgoing,
Gp_listener_port, TCP_listenerFd)));
/*
* Loop until all connections are completed or time limit is exceeded.
*/
while (outgoing_count < expectedTotalOutgoing ||
incoming_count < expectedTotalIncoming)
{ /* select() loop */
struct timeval timeout;
mpp_fd_set rset,
wset,
eset;
int highsock = -1;
uint64 timeout_ms = 20 * 60 * 1000;
int outgoing_fail_count = 0;
int select_errno;
iteration++;
MPP_FD_ZERO(&rset);
MPP_FD_ZERO(&wset);
MPP_FD_ZERO(&eset);
/* Expecting any new inbound connections? */
if (incoming_count < expectedTotalIncoming)
{
if (TCP_listenerFd < 0)
{
elog(FATAL, "SetupTCPInterconnect: bad listener");
}
MPP_FD_SET(TCP_listenerFd, &rset);
highsock = TCP_listenerFd;
}
/* Inbound connections awaiting registration message */
foreach(cell, interconnect_context->incompleteConns)
{
conn = (MotionConn *) lfirst(cell);
if (conn->state != mcsRecvRegMsg || conn->sockfd < 0)
{
elog(FATAL, "SetupTCPInterconnect: incomplete connection bad state or bad fd");
}
MPP_FD_SET(conn->sockfd, &rset);
highsock = Max(highsock, conn->sockfd);
}
/* Outgoing connections */
outgoing_count = 0;
n = sendingChunkTransportState ? sendingChunkTransportState->numConns : 0;
for (i = 0; i < n; i++)
{
index = i;
conn = &sendingChunkTransportState->conns[index];
/* Time to cancel incomplete connect() and retry? */
if (conn->state == mcsConnecting &&
conn->wakeup_ms > 0 &&
conn->wakeup_ms <= elapsed_ms + 20)
{
ereport(LOG, (errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("Interconnect timeout: Connection "
"to seg%d %s from local port %s was not "
"complete after " UINT64_FORMAT
"ms " UINT64_FORMAT " elapsed. Will retry.",
conn->remoteContentId,
conn->remoteHostAndPort,
conn->localHostAndPort,
conn->wakeup_ms, (elapsed_ms + 20))));
conn->state = mcsSetupOutgoingConnection;
}
/* Time to connect? */
if (conn->state == mcsSetupOutgoingConnection &&
conn->wakeup_ms <= elapsed_ms + 20)
{
setupOutgoingConnection(interconnect_context, sendingChunkTransportState, conn);
switch (conn->state)
{
case mcsSetupOutgoingConnection:
/* Retry failed connection after awhile. */
conn->wakeup_ms = (iteration - 1) * 1000 + elapsed_ms;
break;
case mcsConnecting:
/* Set time limit for connect() to complete. */
if (interconnect_context->aggressiveRetry)
conn->wakeup_ms = CONNECT_AGGRESSIVERETRY_MS + elapsed_ms;
else
conn->wakeup_ms = CONNECT_RETRY_MS + elapsed_ms;
break;
default:
conn->wakeup_ms = 0;
break;
}
}
/* What events are we watching for? */
switch (conn->state)
{
case mcsNull:
break;
case mcsSetupOutgoingConnection:
outgoing_fail_count++;
break;
case mcsConnecting:
if (conn->sockfd < 0)
{
elog(FATAL, "SetupTCPInterconnect: bad fd, mcsConnecting");
}
MPP_FD_SET(conn->sockfd, &wset);
MPP_FD_SET(conn->sockfd, &eset);
highsock = Max(highsock, conn->sockfd);
break;
case mcsSendRegMsg:
if (conn->sockfd < 0)
{
elog(FATAL, "SetupTCPInterconnect: bad fd, mcsSendRegMsg");
}
MPP_FD_SET(conn->sockfd, &wset);
highsock = Max(highsock, conn->sockfd);
break;
case mcsStarted:
outgoing_count++;
break;
default:
elog(FATAL, "SetupTCPInterconnect: bad connection state");
}
if (conn->wakeup_ms > 0)
timeout_ms = Min(timeout_ms, conn->wakeup_ms - elapsed_ms);
} /* loop to set up outgoing connections */
/* Break out of select() loop if completed all connections. */
if (outgoing_count == expectedTotalOutgoing &&
incoming_count == expectedTotalIncoming)
break;
/*
* Been here long? Bail if gp_interconnect_setup_timeout exceeded.
*/
if (interconnect_setup_timeout > 0)
{
int to = interconnect_setup_timeout * 1000;
if (to <= elapsed_ms + 20)
ereport(ERROR, (errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("Interconnect timeout: Unable to "
"complete setup of all connections "
"within time limit."),
errdetail("Completed %d of %d incoming and "
"%d of %d outgoing connections. "
"gp_interconnect_setup_timeout = %d "
"seconds.",
incoming_count, expectedTotalIncoming,
outgoing_count, expectedTotalOutgoing,
interconnect_setup_timeout)
));
/* don't wait for more than 500ms */
timeout_ms = Min(500, Min(timeout_ms, to - elapsed_ms));
}
/* check if segments have errors already for every 2 seconds */
if (Gp_role == GP_ROLE_DISPATCH && elapsed_ms - last_qd_check_ms > 2000)
{
last_qd_check_ms = elapsed_ms;
checkForCancelFromQD(interconnect_context);
}
/*
* If no socket events to wait for, loop to retry after a pause.
*/
if (highsock < 0)
{
if (gp_log_interconnect >= GPVARS_VERBOSITY_VERBOSE &&
(timeout_ms > 0 || iteration > 2))
ereport(LOG, (errmsg("SetupInterconnect+" UINT64_FORMAT
"ms: pause " UINT64_FORMAT "ms "
"outgoing_fail=%d iteration=%d",
elapsed_ms, timeout_ms,
outgoing_fail_count, iteration)
));
/* Shouldn't be in this loop unless we have some work to do. */
if (outgoing_fail_count <= 0)
{
elog(FATAL, "SetupInterconnect: invalid outgoing count");
}
/* Wait until earliest wakeup time or overall timeout. */
if (timeout_ms > 0)
{
ML_CHECK_FOR_INTERRUPTS(interconnect_context->teardownActive);
pg_usleep(timeout_ms * 1000);
ML_CHECK_FOR_INTERRUPTS(interconnect_context->teardownActive);
}
/* Back to top of loop and look again. */
elapsed_ms = gp_get_elapsed_ms(&startTime);
continue;
}
/*
* Wait for socket events.
*
* In order to handle errors at intervals less than the full timeout
* length, we limit our select(2) wait to a maximum of 500ms.
*/
if (timeout_ms > 0)
{
timeout.tv_sec = timeout_ms / 1000; /* 0 */
timeout.tv_usec = (timeout_ms - (timeout.tv_sec * 1000)) * 1000;
Assert(timeout_ms == timeout.tv_sec * 1000 + timeout.tv_usec / 1000);
}
else
timeout.tv_sec = timeout.tv_usec = 0;
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
{
initStringInfo(&logbuf);
format_fd_set(&logbuf, highsock + 1, &rset, "r={", "} ");
format_fd_set(&logbuf, highsock + 1, &wset, "w={", "} ");
format_fd_set(&logbuf, highsock + 1, &eset, "e={", "}");
elapsed_ms = gp_get_elapsed_ms(&startTime);
ereport(DEBUG1, (errmsg("SetupInterconnect+" UINT64_FORMAT
"ms: select() "
"Interest: %s. timeout=" UINT64_FORMAT "ms "
"outgoing_fail=%d iteration=%d",
elapsed_ms, logbuf.data, timeout_ms,
outgoing_fail_count, iteration)));
pfree(logbuf.data);
MemSet(&logbuf, 0, sizeof(logbuf));
}
ML_CHECK_FOR_INTERRUPTS(interconnect_context->teardownActive);
n = select(highsock + 1, (fd_set *) &rset, (fd_set *) &wset, (fd_set *) &eset, &timeout);
select_errno = errno;
ML_CHECK_FOR_INTERRUPTS(interconnect_context->teardownActive);
if (Gp_role == GP_ROLE_DISPATCH)
checkForCancelFromQD(interconnect_context);
elapsed_ms = gp_get_elapsed_ms(&startTime);
/*
* Log the select() if requested.
*/
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG ||
(gp_log_interconnect >= GPVARS_VERBOSITY_VERBOSE &&
n != expectedTotalIncoming + expectedTotalOutgoing))
{
int elevel = (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG) ? DEBUG1 : LOG;
initStringInfo(&logbuf);
if (n > 0)
{
appendStringInfo(&logbuf, "result=%d Ready: ", n);
format_fd_set(&logbuf, highsock + 1, &rset, "r={", "} ");
format_fd_set(&logbuf, highsock + 1, &wset, "w={", "} ");
format_fd_set(&logbuf, highsock + 1, &eset, "e={", "}");
}
else
appendStringInfoString(&logbuf, n < 0 ? "error" : "timeout");
ereport(elevel, (errmsg("SetupInterconnect+" UINT64_FORMAT "ms: select() %s",
elapsed_ms, logbuf.data)));
pfree(logbuf.data);
MemSet(&logbuf, 0, sizeof(logbuf));
}
/* An error other than EINTR is not acceptable */
if (n < 0)
{
if (select_errno == EINTR)
continue;
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error in select: %s",
strerror(select_errno))));
}
/*
* check our connections that are accepted'd but no register message.
* we don't know which motion node these apply to until we actually
* receive the REGISTER message. this is why they are all in a single
* list.
*
* NOTE: we don't use foreach() here because we want to trim from the
* list as we go.
*
* We used to bail out of the while loop when incoming_count hit
* expectedTotalIncoming, but that causes problems if some connections
* are left over -- better to just process them here.
*/
cell = list_head(interconnect_context->incompleteConns);
while (n > 0 && cell != NULL)
{
conn = (MotionConn *) lfirst(cell);
/*
* we'll get the next cell ready now in case we need to delete the
* cell that corresponds to our MotionConn
*/
cell = lnext(cell);
if (MPP_FD_ISSET(conn->sockfd, &rset))
{
n--;
if (readRegisterMessage(interconnect_context, conn))
{
/*
* We're done with this connection (either it is bogus
* (and has been dropped), or we've added it to the
* appropriate hash table)
*/
interconnect_context->incompleteConns = list_delete_ptr(interconnect_context->incompleteConns, conn);
/* is the connection ready ? */
if (conn->sockfd != -1)
incoming_count++;
if (conn->pBuff)
pfree(conn->pBuff);
/* Free temporary MotionConn storage. */
pfree(conn);
}
}
}
/*
* Someone tickling our listener port? Accept pending connections.
*/
if (MPP_FD_ISSET(TCP_listenerFd, &rset))
{
n--;
while ((conn = acceptIncomingConnection()) != NULL)
{
/*
* get the connection read for a subsequent call to
* ReadRegisterMessage()
*/
conn->state = mcsRecvRegMsg;
conn->msgSize = sizeof(RegisterMessage);
conn->msgPos = conn->pBuff;
conn->remapper = CreateTupleRemapper();
interconnect_context->incompleteConns = lappend(interconnect_context->incompleteConns, conn);
}
}
/*
* Check our outgoing connections.
*/
i = 0;
while (n > 0 &&
outgoing_count < expectedTotalOutgoing &&
i < sendingChunkTransportState->numConns)
{ /* loop to check outgoing connections */
conn = &sendingChunkTransportState->conns[i++];
switch (conn->state)
{
case mcsConnecting:
/* Has connect() succeeded or failed? */
if (MPP_FD_ISSET(conn->sockfd, &wset) ||
MPP_FD_ISSET(conn->sockfd, &eset))
{
n--;
updateOutgoingConnection(interconnect_context, sendingChunkTransportState, conn, -1);
switch (conn->state)
{
case mcsSetupOutgoingConnection:
/* Failed. Wait awhile before retrying. */
conn->wakeup_ms = (iteration - 1) * 1000 + elapsed_ms;
break;
case mcsSendRegMsg:
/* Connected, but reg msg not fully sent. */
conn->wakeup_ms = 0;
break;
case mcsStarted:
/* Connected, sent reg msg, ready to rock. */
outgoing_count++;
break;
default:
elog(FATAL, "SetupInterconnect: bad outgoing state");
}
}
break;
case mcsSendRegMsg:
/* Ready to continue sending? */
if (MPP_FD_ISSET(conn->sockfd, &wset))
{
n--;
sendRegisterMessage(interconnect_context, sendingChunkTransportState, conn);
if (conn->state == mcsStarted)
outgoing_count++;
}
break;
default:
break;
}
} /* loop to check outgoing connections */
/* By now we have dealt with all the events reported by select(). */
if (n != 0)
elog(FATAL, "SetupInterconnect: extra select events.");
} /* select() loop */
/*
* if everything really got setup properly then we shouldn't have any
* incomplete connections.
*
* XXX: In some cases (when the previous query got 'fast-track cancelled'
* because of an error during setup) we can wind up with connections here
* which ought to have been cleaned up. These connections should be closed
* out here. It would obviously be better if we could avoid these
* connections in the first place!
*/
if (list_length(interconnect_context->incompleteConns) != 0)
{
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
elog(DEBUG2, "Incomplete connections after known connections done, cleaning %d",
list_length(interconnect_context->incompleteConns));
while ((cell = list_head(interconnect_context->incompleteConns)) != NULL)
{
conn = (MotionConn *) lfirst(cell);
if (conn->sockfd != -1)
{
flushIncomingData(conn->sockfd);
shutdown(conn->sockfd, SHUT_WR);
closesocket(conn->sockfd);
conn->sockfd = -1;
}
interconnect_context->incompleteConns = list_delete_ptr(interconnect_context->incompleteConns, conn);
if (conn->pBuff)
pfree(conn->pBuff);
pfree(conn);
}
}
interconnect_context->activated = true;
if (gp_log_interconnect >= GPVARS_VERBOSITY_TERSE)
{
elapsed_ms = gp_get_elapsed_ms(&startTime);
if (gp_log_interconnect >= GPVARS_VERBOSITY_VERBOSE ||
elapsed_ms >= 0.1 * 1000 * interconnect_setup_timeout)
elog(LOG, "SetupInterconnect+" UINT64_FORMAT "ms: Activated %d incoming, "
"%d outgoing routes.",
elapsed_ms, incoming_count, outgoing_count);
}
estate->interconnect_context = interconnect_context;
estate->es_interconnect_is_setup = true;
} /* SetupInterconnect */
/* TeardownInterconnect() function is used to cleanup interconnect resources that
* were allocated during SetupInterconnect(). This function should ALWAYS be
* called after SetupInterconnect to avoid leaking resources (like sockets)
* even if SetupInterconnect did not complete correctly. As a result, this
* function must complete successfully even if SetupInterconnect didn't.
*
* SetupInterconnect() always gets called under the ExecutorState MemoryContext.
* This context is destroyed at the end of the query and all memory that gets
* allocated under it is free'd. We don't have have to worry about pfree() but
* we definitely have to worry about socket resources.
*/
void
TeardownTCPInterconnect(ChunkTransportState *transportStates, bool hasErrors)
{
ListCell *cell;
ChunkTransportStateEntry *pEntry = NULL;
int i;
ExecSlice *mySlice;
MotionConn *conn;
if (transportStates == NULL || transportStates->sliceTable == NULL)
{
elog(LOG, "TeardownTCPInterconnect: missing slice table.");
return;
}
/*
* if we're already trying to clean up after an error -- don't allow
* signals to interrupt us
*/
if (hasErrors)
HOLD_INTERRUPTS();
mySlice = &transportStates->sliceTable->slices[transportStates->sliceId];
/* Log the start of TeardownInterconnect. */
if (gp_log_interconnect >= GPVARS_VERBOSITY_TERSE)
{
int elevel = 0;
if (hasErrors || !transportStates->activated)
{
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
elevel = LOG;
else
elevel = DEBUG1;
}
else if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
elevel = DEBUG4;
if (elevel)
ereport(elevel, (errmsg("Interconnect seg%d slice%d cleanup state: "
"%s; setup was %s",
GpIdentity.segindex, mySlice->sliceIndex,
hasErrors ? "error" : "normal",
transportStates->activated ? "completed" : "exited")));
/* if setup did not complete, log the slicetable */
if (!transportStates->activated &&
gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
elog_node_display(DEBUG3, "local slice table", transportStates->sliceTable, true);
}
/*
* phase 1 mark all sockets (senders and receivers) with shutdown(2),
* start with incomplete connections (if any).
*/
/*
* The incompleteConns list is only used as a staging area for MotionConns
* during by SetupInterconnect(). So we only expect to have entries here
* if SetupInterconnect() did not finish correctly.
*
* NOTE: we don't use foreach() here because we want to trim from the list
* as we go.
*/
if (transportStates->incompleteConns &&
gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
elog(DEBUG2, "Found incomplete conn. length %d", list_length(transportStates->incompleteConns));
/*
* These are connected inbound peers that we haven't dealt with quite yet
*/
while ((cell = list_head(transportStates->incompleteConns)) != NULL)
{
MotionConn *conn = (MotionConn *) lfirst(cell);
/* they're incomplete, so just slam them shut. */
if (conn->sockfd != -1)
{
flushIncomingData(conn->sockfd);
shutdown(conn->sockfd, SHUT_WR);
closesocket(conn->sockfd);
conn->sockfd = -1;
}
/* free up the tuple remapper */
if (conn->remapper)
{
DestroyTupleRemapper(conn->remapper);
conn->remapper = NULL;
}
/*
* The list operations are kind of confusing (see list.c), we could
* alternatively write the following line as:
*
* incompleteConns = list_delete_cell(incompleteConns, cell, NULL); or
* incompleteConns = list_delete_first(incompleteConns); or
* incompleteConns = list_delete_ptr(incompleteConns, conn)
*/
transportStates->incompleteConns = list_delete(transportStates->incompleteConns, conn);
}
list_free(transportStates->incompleteConns);
transportStates->incompleteConns = NIL;
/*
* Now "normal" connections which made it through our peer-registration
* step. With these we have to worry about "in-flight" data.
*/
if (mySlice->parentIndex != -1)
{
/* cleanup a Sending motion node. */
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
elog(DEBUG3, "Interconnect seg%d slice%d closing connections to slice%d",
GpIdentity.segindex, mySlice->sliceIndex, mySlice->parentIndex);
getChunkTransportState(transportStates, mySlice->sliceIndex, &pEntry);
for (i = 0; i < pEntry->numConns; i++)
{
conn = pEntry->conns + i;
if (conn->sockfd >= 0)
shutdown(conn->sockfd, SHUT_WR);
/* free up the tuple remapper */
if (conn->remapper)
{
DestroyTupleRemapper(conn->remapper);
conn->remapper = NULL;
}
}
}
/*
* cleanup all of our Receiving Motion nodes, these get closed immediately
* (the receiver know for real if they want to shut down -- they aren't
* going to be processing any more data).
*/
foreach(cell, mySlice->children)
{
ExecSlice *aSlice;
int childId = lfirst_int(cell);
aSlice = &transportStates->sliceTable->slices[childId];
getChunkTransportState(transportStates, aSlice->sliceIndex, &pEntry);
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
elog(DEBUG3, "Interconnect closing connections from slice%d",
aSlice->sliceIndex);
/*
* receivers know that they no longer care about data from below ...
* so we can safely discard data queued in both directions
*/
for (i = 0; i < pEntry->numConns; i++)
{
conn = pEntry->conns + i;
if (conn->sockfd >= 0)
{
flushIncomingData(conn->sockfd);
shutdown(conn->sockfd, SHUT_WR);
closesocket(conn->sockfd);
conn->sockfd = -1;
/* free up the tuple remapper */
if (conn->remapper)
{
DestroyTupleRemapper(conn->remapper);
conn->remapper = NULL;
}
}
}
removeChunkTransportState(transportStates, aSlice->sliceIndex);
pfree(pEntry->conns);
}
/*
* phase 2: wait on all sockets for completion, when complete call close
* and free (if required)
*/
if (mySlice->parentIndex != -1)
{
/* cleanup a Sending motion node. */
getChunkTransportState(transportStates, mySlice->sliceIndex, &pEntry);
/*
* On a normal teardown routine, sender has sent an EOS packet and
* disabled further send operations on phase 1. sender can't close the
* connection immediately because EOS packet or data packets within the
* kernel sending buffer may be lost on some platform if sender close the
* connection totally.
*
* The correct way is sender blocks on the connection until receivers
* get the EOS packets and close the peer, then it's safe for sender to
* close the connection totally.
*
* If some errors are happening, senders can skip this step to avoid hung
* issues, QD will take care of the error handling.
*/
if (!hasErrors)
waitOnOutbound(pEntry);
for (i = 0; i < pEntry->numConns; i++)
{
conn = pEntry->conns + i;
if (conn->sockfd >= 0)
{
closesocket(conn->sockfd);
conn->sockfd = -1;
}
}
pEntry = removeChunkTransportState(transportStates, mySlice->sliceIndex);
}
/*
* If there are clients waiting on our listener; we *must* disconnect
* them; otherwise we'll be out of sync with the client (we may accept
* them on a subsequent query!)
*/
if (TCP_listenerFd != -1)
flushInterconnectListenerBacklog();
transportStates->activated = false;
transportStates->sliceTable = NULL;
#ifdef ENABLE_IC_PROXY
ic_proxy_backend_close_context(transportStates);
#endif /* ENABLE_IC_PROXY */
if (transportStates->states != NULL)
pfree(transportStates->states);
pfree(transportStates);
if (hasErrors)
RESUME_INTERRUPTS();
#ifdef AMS_VERBOSE_LOGGING
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
elog(DEBUG4, "TeardownInterconnect successful");
#endif
}
#ifdef AMS_VERBOSE_LOGGING
void
dumpEntryConnections(int elevel, ChunkTransportStateEntry *pEntry)
{
int i;
MotionConn *conn;
for (i = 0; i < pEntry->numConns; i++)
{
conn = &pEntry->conns[i];
if (conn->sockfd == -1 &&
conn->state == mcsNull)
elog(elevel, "... motNodeId=%d conns[%d]: not connected",
pEntry->motNodeId, i);
else
elog(elevel, "... motNodeId=%d conns[%d]: "
"%d pid=%d sockfd=%d remote=%s local=%s",
pEntry->motNodeId, i,
conn->remoteContentId,
conn->cdbProc ? conn->cdbProc->pid : 0,
conn->sockfd,
conn->remoteHostAndPort,
conn->localHostAndPort);
}
}
static void
print_connection(ChunkTransportState *transportStates, int fd, const char *msg)
{
struct sockaddr_in local,
remote;
socklen_t len;
int errlevel = transportStates->teardownActive ? LOG : ERROR;
len = sizeof(remote);
if (getpeername(fd, (struct sockaddr *) &remote, &len) < 0)
{
elog(errlevel, "print_connection(%d, %s): can't get peername err: %m",
fd, msg);
}
len = sizeof(local);
if (getsockname(fd, (struct sockaddr *) &local, &len) < 0)
{
elog(errlevel, "print_connection(%d, %s): can't get localname err: %m",
fd, msg);
}
elog(DEBUG2, "%s: w/ports (%d/%d)",
msg, ntohs(local.sin_port), ntohs(remote.sin_port));
}
#endif
static void
format_fd_set(StringInfo buf, int nfds, mpp_fd_set *fds, char *pfx, char *sfx)
{
int i;
bool first = true;
appendStringInfoString(buf, pfx);
for (i = 1; i < nfds; i++)
{
if (MPP_FD_ISSET(i, fds))
{
if (!first)
appendStringInfoChar(buf, ',');
appendStringInfo(buf, "%d", i);
first = false;
}
}
appendStringInfoString(buf, sfx);
}
static void
flushInterconnectListenerBacklog(void)
{
int pendingConn,
newfd,
i;
mpp_fd_set rset;
struct timeval timeout;
do
{
MPP_FD_ZERO(&rset);
MPP_FD_SET(TCP_listenerFd, &rset);
timeout.tv_sec = 0;
timeout.tv_usec = 0;
pendingConn = select(TCP_listenerFd + 1, (fd_set *) &rset, NULL, NULL, &timeout);
if (pendingConn > 0)
{
for (i = 0; i < pendingConn; i++)
{
struct sockaddr_storage remoteAddr;
struct sockaddr_storage localAddr;
char remoteHostAndPort[64];
char localHostAndPort[64];
socklen_t addrsize;
addrsize = sizeof(remoteAddr);
newfd = accept(TCP_listenerFd, (struct sockaddr *) &remoteAddr, &addrsize);
if (newfd < 0)
{
ereport(DEBUG3, (errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("Interconnect error while clearing incoming connections."),
errdetail("%s sockfd=%d: %m", "accept", newfd)));
continue;
}
if (gp_log_interconnect >= GPVARS_VERBOSITY_VERBOSE)
{
/* Get remote and local host:port strings for message. */
format_sockaddr(&remoteAddr, remoteHostAndPort,
sizeof(remoteHostAndPort));
addrsize = sizeof(localAddr);
if (getsockname(newfd, (struct sockaddr *) &localAddr, &addrsize))
{
ereport(LOG,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error while clearing incoming connections"),
errdetail("getsockname sockfd=%d remote=%s: %m",
newfd, remoteHostAndPort)));
}
else
{
format_sockaddr(&localAddr, localHostAndPort,
sizeof(localHostAndPort));
ereport(DEBUG2, (errmsg("Interconnect clearing incoming connection "
"from remote=%s to local=%s. sockfd=%d.",
remoteHostAndPort, localHostAndPort,
newfd)));
}
}
/* make socket non-blocking */
if (!pg_set_noblock(newfd))
{
elog(LOG, "During incoming queue flush, could not set non-blocking.");
}
else
{
/* shutdown this socket */
flushIncomingData(newfd);
}
shutdown(newfd, SHUT_WR);
closesocket(newfd);
}
}
else if (pendingConn < 0 && errno != EINTR)
{
ereport(LOG,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error during listener cleanup"),
errdetail("select sockfd=%d: %m", TCP_listenerFd)));
}
/*
* now we either loop through for another check (on EINTR or if we
* cleaned one client) or we're done
*/
}
while (pendingConn != 0);
}
/*
* Wait for our peer to close the socket (at which point our select(2)
* will tell us that the socket is ready to read, and the socket recv
* will return 0 or a 'stop' message.
*
* This works without the select, but burns tons of CPU doing nothing
* useful.
*
* ----
* The way it used to work, is we used CHECK_FOR_INTERRUPTS(), and
* wrapped it in PG_TRY: We *must* return locally; otherwise
* TeardownInterconnect() can't exit cleanly. So we wrap our
* cancel-detection checks for interrupts with a PG_TRY block.
*
* By swallowing the non-local return on cancel, we lose the "cancel"
* state (CHECK_FOR_INTERRUPTS() clears QueryCancelPending()). So we
* should just check QueryCancelPending here ... and avoid calling
* CHECK_FOR_INTERRUPTS().
*
* ----
*
* Now we just check explicitly for interrupts (which is, as far as I
* can tell, the only interrupt-driven state change we care
* about). This should give us notification of ProcDiePending and
* QueryCancelPending
*
* XXX: Consider using something like WaitLatchOrSocket() instead of select().
*/
static void
waitOnOutbound(ChunkTransportStateEntry *pEntry)
{
MotionConn *conn;
mpp_fd_set waitset,
curset;
int maxfd = -1;
int i,
n,
conn_count = 0;
struct timeval endtime;
SIMPLE_FAULT_INJECTOR("waitOnOutbound");
MPP_FD_ZERO(&waitset);
for (i = 0; i < pEntry->numConns; i++)
{
conn = pEntry->conns + i;
if (conn->sockfd >= 0)
{
MPP_FD_SET(conn->sockfd, &waitset);
if (conn->sockfd > maxfd)
maxfd = conn->sockfd;
conn_count++;
}
}
gettimeofday(&endtime, NULL);
endtime.tv_sec += Gp_interconnect_transmit_timeout;
for (;;)
{
int saved_err;
struct timeval timeout;
struct timeval now;
int64 timeoutval;
if (conn_count == 0)
return;
if (CancelRequested() || QueryFinishPending)
{
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG3, "waitOnOutbound(): interrupt pending fast-track");
#endif
return;
}
gettimeofday(&now, NULL);
timeoutval = (endtime.tv_sec * 1000000 + endtime.tv_usec) - (now.tv_sec * 1000000 + now.tv_usec);
if (timeoutval <= 0)
{
/*
* We timed out trying to receive a final response from the motion
* receiver (either a 'stop' message or 0). Since this response
* confirms that the receiver has received the EOS from this sender
* (and all preceding data), receipt of this response is vital.
* Hence, we error out.
*/
ereport(ERROR,
errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("timed out waiting for response from motion receiver during TCP interconnect teardown"),
errdetail("%d connection(s) with pending response after %d seconds",
conn_count,
Gp_interconnect_transmit_timeout));
}
timeout.tv_sec = timeoutval / 1000000;
timeout.tv_usec = timeoutval % 1000000;
memcpy(&curset, &waitset, sizeof(mpp_fd_set));
n = select(maxfd + 1, (fd_set *) &curset, NULL, NULL, &timeout);
if (n < 0)
{
if (errno == EINTR)
continue;
saved_err = errno;
if (CancelRequested() || QueryFinishPending)
return;
/* Something unexpected, but probably not horrible warn and return */
elog(LOG, "TeardownTCPInterconnect: waitOnOutbound select errno=%d", saved_err);
break;
}
if (n == 0)
{
/*
* We timed out trying to receive a final response from the motion
* receiver (either a 'stop' message or 0). Since this response
* confirms that the receiver has received the EOS from this sender
* (and all preceding data), receipt of this response is vital.
* Hence, we error out.
*/
ereport(ERROR,
errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("timed out waiting for response from motion receiver during TCP interconnect teardown"),
errdetail("%d connection(s) with pending response after %d seconds",
conn_count,
Gp_interconnect_transmit_timeout));
}
for (i = 0; i < pEntry->numConns; i++)
{
conn = pEntry->conns + i;
if (conn->sockfd >= 0 && MPP_FD_ISSET(conn->sockfd, &curset))
{
int count;
char buf;
/* ready to read. */
count = recv(conn->sockfd, &buf, sizeof(buf), 0);
if (count == 0 || count == 1) /* done ! */
{
/* got a stop message */
AssertImply(count == 1, buf == 'S');
MPP_FD_CLR(conn->sockfd, &waitset);
/* we may have finished */
conn_count--;
continue;
}
else if (count < 0 && (errno == EAGAIN || errno == EINTR))
continue;
/*
* Something unexpected, but probably not horrible warn and
* return
*/
MPP_FD_CLR(conn->sockfd, &waitset);
/* we may have finished */
conn_count--;
elog(LOG, "TeardownTCPInterconnect: waitOnOutbound %s: %m", "recv");
continue;
}
}
}
return;
}
static void
doSendStopMessageTCP(ChunkTransportState *transportStates, int16 motNodeID)
{
ChunkTransportStateEntry *pEntry = NULL;
MotionConn *conn;
int i;
char m = 'S';
ssize_t written;
SIMPLE_FAULT_INJECTOR("doSendStopMessageTCP");
getChunkTransportState(transportStates, motNodeID, &pEntry);
Assert(pEntry);
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
elog(DEBUG3, "Interconnect needs no more input from slice%d; notifying senders to stop.",
motNodeID);
/*
* Note: we're only concerned with receivers here.
*/
for (i = 0; i < pEntry->numConns; i++)
{
conn = pEntry->conns + i;
if (conn->sockfd >= 0 &&
MPP_FD_ISSET(conn->sockfd, &pEntry->readSet))
{
/* someone is trying to send stuff to us, let's stop 'em */
while ((written = send(conn->sockfd, &m, sizeof(m), 0)) < 0)
{
if (errno == EINTR)
{
ML_CHECK_FOR_INTERRUPTS(transportStates->teardownActive);
continue;
}
else
break;
}
if (written != sizeof(m))
{
/*
* how can this happen ? the kernel buffer should be empty in
* the send direction
*/
elog(LOG, "SendStopMessage: failed on write. %m");
}
}
/* CRITICAL TO AVOID DEADLOCK */
DeregisterReadInterest(transportStates, motNodeID, i,
"no more input needed");
}
}
static TupleChunkListItem
RecvTupleChunkFromTCP(ChunkTransportState *transportStates,
int16 motNodeID,
int16 srcRoute)
{
ChunkTransportStateEntry *pEntry = NULL;
MotionConn *conn;
/* check em' */
ML_CHECK_FOR_INTERRUPTS(transportStates->teardownActive);
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG5, "RecvTupleChunkFrom(motNodID=%d, srcRoute=%d)", motNodeID, srcRoute);
#endif
getChunkTransportState(transportStates, motNodeID, &pEntry);
conn = pEntry->conns + srcRoute;
return RecvTupleChunk(conn, transportStates);
}
static TupleChunkListItem
RecvTupleChunkFromAnyTCP(ChunkTransportState *transportStates,
int16 motNodeID,
int16 *srcRoute)
{
ChunkTransportStateEntry *pEntry = NULL;
TupleChunkListItem tcItem;
MotionConn *conn;
mpp_fd_set rset;
int n,
i,
index;
bool skipSelect = false;
int nwaitfds = 0;
int *waitFds = NULL;
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG5, "RecvTupleChunkFromAny(motNodeId=%d)", motNodeID);
#endif
getChunkTransportState(transportStates, motNodeID, &pEntry);
int retry = 0;
do
{
/* Every 2 seconds */
if (Gp_role == GP_ROLE_DISPATCH && retry++ > 4)
{
retry = 0;
/* check to see if the dispatcher should cancel */
checkForCancelFromQD(transportStates);
}
struct timeval timeout = tval;
int nfds = pEntry->highReadSock;
/* make sure we check for these. */
ML_CHECK_FOR_INTERRUPTS(transportStates->teardownActive);
memcpy(&rset, &pEntry->readSet, sizeof(mpp_fd_set));
/*
* since we may have data in a local buffer, we may be able to
* short-circuit the select() call (and if we don't do this we may
* wait when we have data ready, since it has already been read)
*/
for (i = 0; i < pEntry->numConns; i++)
{
conn = pEntry->conns + i;
if (conn->sockfd >= 0 &&
MPP_FD_ISSET(conn->sockfd, &rset) &&
conn->recvBytes != 0)
{
/* we have data on this socket, let's short-circuit our select */
MPP_FD_ZERO(&rset);
MPP_FD_SET(conn->sockfd, &rset);
skipSelect = true;
}
}
if (skipSelect)
break;
/*
* Also monitor the events on dispatch fds, eg, errors or sequence
* request from QEs.
*/
nwaitfds = 0;
if (Gp_role == GP_ROLE_DISPATCH)
{
waitFds = cdbdisp_getWaitSocketFds(transportStates->estate->dispatcherState, &nwaitfds);
if (waitFds != NULL)
for (i = 0; i < nwaitfds; i++)
{
MPP_FD_SET(waitFds[i], &rset);
/* record the max fd number for select() later */
if (waitFds[i] > nfds)
nfds = waitFds[i];
}
}
// GPDB_12_MERGE_FIXME: should use WaitEventSetWait() instead of select()
// follow the routine in ic_udpifc.c
n = select(nfds + 1, (fd_set *) &rset, NULL, NULL, &timeout);
if (n < 0)
{
if (errno == EINTR)
continue;
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error receiving an incoming packet"),
errdetail("%s: %m", "select")));
}
else if (n > 0 && nwaitfds > 0)
{
bool need_check = false;
for (i = 0; i < nwaitfds; i++)
if (MPP_FD_ISSET(waitFds[i], &rset))
{
need_check = true;
n--;
}
/* handle events on dispatch connection */
if (need_check)
checkForCancelFromQD(transportStates);
}
if (waitFds)
pfree(waitFds);
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG5, "RecvTupleChunkFromAny() select() returned %d ready sockets", n);
#endif
} while (n < 1);
/*
* We scan the file descriptors starting from where we left off in the
* last call (don't continually poll the first when others may be ready!).
*/
index = pEntry->scanStart;
for (i = 0; i < pEntry->numConns; i++, index++)
{
/*
* avoid division ? index = ((scanStart + i) % pEntry->numConns);
*/
if (index >= pEntry->numConns)
index = 0;
conn = pEntry->conns + index;
#ifdef AMS_VERBOSE_LOGGING
if (!conn->stillActive)
{
elog(LOG, "RecvTupleChunkFromAny: trying to read on inactive socket %d", conn->sockfd);
}
#endif
if (conn->sockfd >= 0 &&
MPP_FD_ISSET(conn->sockfd, &rset))
{
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG5, "RecvTupleChunkFromAny() (fd %d) %d/%d", conn->sockfd, motNodeID, index);
#endif
tcItem = RecvTupleChunk(conn, transportStates);
*srcRoute = index;
/*
* advance start point (avoid doing division/modulus operation
* here)
*/
pEntry->scanStart = index + 1;
return tcItem;
}
}
/* we should never ever get here... */
elog(FATAL, "RecvTupleChunkFromAnyTCP: didn't receive, and didn't get cancelled");
return NULL; /* keep the compiler happy */
}
/* See ml_ipc.h */
static void
SendEosTCP(ChunkTransportState *transportStates,
int motNodeID,
TupleChunkListItem tcItem)
{
ChunkTransportStateEntry *pEntry = NULL;
MotionConn *conn;
int i;
if (!transportStates)
{
elog(FATAL, "SendEosTCP: missing interconnect context.");
}
else if (!transportStates->activated && !transportStates->teardownActive)
{
elog(FATAL, "SendEosTCP: context and teardown inactive.");
}
/* check em' */
ML_CHECK_FOR_INTERRUPTS(transportStates->teardownActive);
getChunkTransportState(transportStates, motNodeID, &pEntry);
if (gp_log_interconnect >= GPVARS_VERBOSITY_DEBUG)
elog(DEBUG3, "Interconnect seg%d slice%d sending end-of-stream to slice%d",
GpIdentity.segindex, motNodeID, pEntry->recvSlice->sliceIndex);
/*
* we want to add our tcItem onto each of the outgoing buffers -- this is
* guaranteed to leave things in a state where a flush is *required*.
*/
doBroadcast(transportStates, pEntry, tcItem, NULL);
/* now flush all of the buffers. */
for (i = 0; i < pEntry->numConns; i++)
{
conn = pEntry->conns + i;
if (conn->sockfd >= 0 && conn->state == mcsStarted)
flushBuffer(transportStates, pEntry, conn, motNodeID);
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG5, "SendEosTCP() Leaving");
#endif
}
return;
}
static bool
flushBuffer(ChunkTransportState *transportStates,
ChunkTransportStateEntry *pEntry, MotionConn *conn, int16 motionId)
{
char *sendptr;
int n,
sent = 0;
mpp_fd_set wset;
mpp_fd_set rset;
#ifdef AMS_VERBOSE_LOGGING
{
struct timeval snapTime;
gettimeofday(&snapTime, NULL);
elog(DEBUG5, "----sending chunk @%s.%d time is %d.%d",
__FILE__, __LINE__, (int) snapTime.tv_sec, (int) snapTime.tv_usec);
}
#endif
/* first set header length */
*(uint32 *) conn->pBuff = conn->msgSize;
/* now send message */
sendptr = (char *) conn->pBuff;
sent = 0;
do
{
struct timeval timeout;
/* check for stop message or peer teardown before sending anything */
timeout.tv_sec = 0;
timeout.tv_usec = 0;
MPP_FD_ZERO(&rset);
MPP_FD_SET(conn->sockfd, &rset);
/*
* since timeout = 0, select returns imediately and no time is wasted
* waiting trying to send data on the network
*/
n = select(conn->sockfd + 1, (fd_set *) &rset, NULL, NULL, &timeout);
/* handle errors at the write call, below */
if (n > 0 && MPP_FD_ISSET(conn->sockfd, &rset))
{
#ifdef AMS_VERBOSE_LOGGING
print_connection(transportStates, conn->sockfd, "stop from");
#endif
/* got a stop message */
conn->stillActive = false;
return false;
}
if ((n = send(conn->sockfd, sendptr + sent, conn->msgSize - sent, 0)) < 0)
{
int send_errno = errno;
ML_CHECK_FOR_INTERRUPTS(transportStates->teardownActive);
if (send_errno == EINTR)
continue;
if (send_errno == EWOULDBLOCK)
{
do
{
timeout = tval;
ML_CHECK_FOR_INTERRUPTS(transportStates->teardownActive);
MPP_FD_ZERO(&rset);
MPP_FD_ZERO(&wset);
MPP_FD_SET(conn->sockfd, &wset);
MPP_FD_SET(conn->sockfd, &rset);
n = select(conn->sockfd + 1, (fd_set *) &rset, (fd_set *) &wset, NULL, &timeout);
if (n < 0)
{
int select_errno = errno;
if (select_errno == EINTR)
continue;
/*
* if we got an error in teardown, ignore it: treat it
* as a stop message
*/
if (transportStates->teardownActive)
{
#ifdef AMS_VERBOSE_LOGGING
print_connection(transportStates, conn->sockfd, "stop from");
#endif
conn->stillActive = false;
return false;
}
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error writing an outgoing packet: %m"),
errdetail("Error during select() call (error: %d), for remote connection: contentId=%d at %s",
select_errno, conn->remoteContentId,
conn->remoteHostAndPort)));
}
/*
* as a sender... if there is something to read... it must
* mean its a StopSendingMessage or receiver has teared down
* the interconnect, we don't even bother to read it.
*/
if (MPP_FD_ISSET(conn->sockfd, &rset) || transportStates->teardownActive)
{
#ifdef AMS_VERBOSE_LOGGING
print_connection(transportStates, conn->sockfd, "stop from");
#endif
conn->stillActive = false;
return false;
}
} while (n < 1);
}
else
{
/*
* if we got an error in teardown, ignore it: treat it as a
* stop message
*/
if (transportStates->teardownActive)
{
#ifdef AMS_VERBOSE_LOGGING
print_connection(transportStates, conn->sockfd, "stop from");
#endif
conn->stillActive = false;
return false;
}
/* check whether receiver has teared down the interconnect */
timeout.tv_sec = 0;
timeout.tv_usec = 0;
MPP_FD_ZERO(&rset);
MPP_FD_SET(conn->sockfd, &rset);
n = select(conn->sockfd + 1, (fd_set *) &rset, NULL, NULL, &timeout);
/*
* as a sender... if there is something to read... it must
* mean its a StopSendingMessage or receiver has teared down
* the interconnect, we don't even bother to read it.
*/
if (n > 0 && MPP_FD_ISSET(conn->sockfd, &rset))
{
#ifdef AMS_VERBOSE_LOGGING
print_connection(transportStates, conn->sockfd, "stop from");
#endif
/* got a stop message */
conn->stillActive = false;
return false;
}
ereport(ERROR,
(errcode(ERRCODE_GP_INTERCONNECTION_ERROR),
errmsg("interconnect error writing an outgoing packet"),
errdetail("Error during send() call (error:%d) for remote connection: contentId=%d at %s",
send_errno, conn->remoteContentId,
conn->remoteHostAndPort)));
}
}
else
{
sent += n;
}
} while (sent < conn->msgSize);
conn->tupleCount = 0;
conn->msgSize = PACKET_HEADER_SIZE;
return true;
}
/* The Function sendChunk() is used to send a tcItem to a single
* destination. Tuples often are *very small* we aggregate in our
* local buffer before sending into the kernel.
*
* PARAMETERS
* conn - MotionConn that the tcItem is to be sent to.
* tcItem - message to be sent.
* motionId - Node Motion Id.
*/
static bool
SendChunkTCP(ChunkTransportState *transportStates, ChunkTransportStateEntry *pEntry, MotionConn *conn, TupleChunkListItem tcItem, int16 motionId)
{
int length = tcItem->chunk_length;
Assert(conn->msgSize > 0);
#ifdef AMS_VERBOSE_LOGGING
elog(DEBUG5, "sendChunk: msgSize %d this chunk length %d", conn->msgSize, tcItem->chunk_length);
#endif
if (conn->msgSize + length > Gp_max_packet_size)
{
if (!flushBuffer(transportStates, pEntry, conn, motionId))
return false;
}
memcpy(conn->pBuff + conn->msgSize, tcItem->chunk_data, tcItem->chunk_length);
conn->msgSize += length;
conn->tupleCount++;
return true;
}
/*
* gp_set_monotonic_begin_time: set the beginTime and endTime to the current
* time.
*/
static void
gp_set_monotonic_begin_time(GpMonotonicTime *time)
{
time->beginTime.tv_sec = 0;
time->beginTime.tv_usec = 0;
time->endTime.tv_sec = 0;
time->endTime.tv_usec = 0;
gp_get_monotonic_time(time);
time->beginTime.tv_sec = time->endTime.tv_sec;
time->beginTime.tv_usec = time->endTime.tv_usec;
}
/*
* gp_get_monotonic_time
* This function returns the time in the monotonic order.
*
* The new time is stored in time->endTime, which has a larger value than
* the original value. The original endTime is lost.
*
* This function is intended for computing elapsed time between two
* calls. It is not for getting the system time.
*/
static void
gp_get_monotonic_time(GpMonotonicTime *time)
{
struct timeval newTime;
int status;
#if HAVE_LIBRT
/* Use clock_gettime to return monotonic time value. */
struct timespec ts;
status = clock_gettime(CLOCK_MONOTONIC, &ts);
newTime.tv_sec = ts.tv_sec;
newTime.tv_usec = ts.tv_nsec / 1000;
#else
gettimeofday(&newTime, NULL);
status = 0; /* gettimeofday always succeeds. */
#endif
if (status == 0 &&
timeCmp(&time->endTime, &newTime) < 0)
{
time->endTime.tv_sec = newTime.tv_sec;
time->endTime.tv_usec = newTime.tv_usec;
}
else
{
time->endTime.tv_usec = time->endTime.tv_usec + 1;
time->endTime.tv_sec = time->endTime.tv_sec +
(time->endTime.tv_usec / USECS_PER_SECOND);
time->endTime.tv_usec = time->endTime.tv_usec % USECS_PER_SECOND;
}
}
/*
* Compare two times.
*
* If t1 > t2, return 1.
* If t1 == t2, return 0.
* If t1 < t2, return -1;
*/
static inline int
timeCmp(struct timeval *t1, struct timeval *t2)
{
if (t1->tv_sec == t2->tv_sec &&
t1->tv_usec == t2->tv_usec)
return 0;
if (t1->tv_sec > t2->tv_sec ||
(t1->tv_sec == t2->tv_sec &&
t1->tv_usec > t2->tv_usec))
return 1;
return -1;
}
/*
* gp_get_elapsed_us -- return the elapsed time in microseconds
* after the given time->beginTime.
*
* If time->beginTime is not set (0), then return 0.
*
* Note that the beginTime is not changed, but the endTime is set
* to the current time.
*/
static inline uint64
gp_get_elapsed_us(GpMonotonicTime *time)
{
if (time->beginTime.tv_sec == 0 &&
time->beginTime.tv_usec == 0)
return 0;
gp_get_monotonic_time(time);
return ((time->endTime.tv_sec - time->beginTime.tv_sec) * USECS_PER_SECOND +
(time->endTime.tv_usec - time->beginTime.tv_usec));
}
static inline uint64
gp_get_elapsed_ms(GpMonotonicTime *time)
{
return gp_get_elapsed_us(time) / (USECS_PER_SECOND / MSECS_PER_SECOND);
}
|
995f68dfea650f1634314a2bced0708cb01ebc5d
|
e3acfc4f06840e23ef1185dcf367f40d3e3f59b4
|
/tests/regression/02-base/54-spawn-special-unknown.c
|
67d4b17ca60654f195b796deca78aa66275ba5b7
|
[
"MIT"
] |
permissive
|
goblint/analyzer
|
d62d3c610b86ed288849371b41c330c30678abc7
|
69ee7163eef0bfbfd6a4f3b9fda7cea5ce9ab79f
|
refs/heads/master
| 2023-08-16T21:58:53.013737
| 2023-08-16T08:49:18
| 2023-08-16T08:49:18
| 2,066,905
| 141
| 62
|
MIT
| 2023-09-14T18:48:34
| 2011-07-18T15:10:56
|
OCaml
|
UTF-8
|
C
| false
| false
| 237
|
c
|
54-spawn-special-unknown.c
|
#include <pthread.h>
#include <goblint.h>
int g;
int main() {
void (*unknown)(void*);
pthread_t id;
pthread_create(&id, NULL, unknown, NULL);
__goblint_check(g == 0); // UNKNOWN! (unknown thread may invalidate)
return 0;
}
|
0cb7691479d1e00b2a98990b54f4b1b0bb998efb
|
2c052c996d4267623276a681308bf87ea7388f60
|
/src/runtime/json.c
|
7e1567486e2d714cd5da269f919e3b4c02690f33
|
[
"Apache-2.0"
] |
permissive
|
nanovms/nanos
|
17d3ce113b63c4370e40d291b8fd8fb9d943c02d
|
9085e091e5250fc58bf036591c8959e05fd7957f
|
refs/heads/master
| 2023-08-25T16:49:14.521701
| 2023-08-25T14:00:54
| 2023-08-25T14:14:13
| 115,159,616
| 2,055
| 131
|
Apache-2.0
| 2023-09-14T17:16:19
| 2017-12-23T00:25:34
|
C
|
UTF-8
|
C
| false
| false
| 22,158
|
c
|
json.c
|
#include <runtime.h>
typedef closure_type(parse_finish_internal, parser, void *);
typedef closure_type(parse_error_internal, parser, string);
struct parser_common {
heap h;
parse_finish_internal c;
parse_error_internal e;
};
declare_closure_struct(0, 1, parser, json_string_parse,
character, in);
typedef struct json_string_p {
struct parser_common p;
closure_struct(json_string_parse, parse);
string s;
boolean escape;
} *json_string_p;
declare_closure_struct(0, 1, parser, json_number_parse,
character, in);
typedef struct json_number_p {
struct parser_common p;
closure_struct(json_number_parse, parse);
boolean digit_found;
boolean fractional;
} *json_number_p;
declare_closure_struct(0, 1, parser, json_boolean_parse,
character, in);
typedef struct json_boolean_p {
struct parser_common p;
closure_struct(json_boolean_parse, parse);
boolean value;
int char_count;
} *json_boolean_p;
declare_closure_struct(0, 1, parser, json_null_parse,
character, in);
typedef struct json_null_p {
struct parser_common p;
closure_struct(json_null_parse, parse);
int char_count;
} *json_null_p;
declare_closure_struct(0, 1, parser, json_value_parse,
character, in);
declare_closure_struct(0, 1, parser, json_value_complete,
void *, result);
declare_closure_struct(0, 1, parser, json_value_error,
string, err);
typedef struct json_value_p {
struct parser_common p;
closure_struct(json_value_parse, parse);
closure_struct(json_value_complete, c);
closure_struct(json_value_error, e);
} *json_value_p;
declare_closure_struct(0, 1, parser, json_attr_parse,
character, in);
declare_closure_struct(0, 1, parser, json_attr_name_complete,
void *, result);
declare_closure_struct(0, 1, parser, json_attr_value_complete,
void *, result);
declare_closure_struct(0, 1, parser, json_attr_error,
string, err);
typedef struct json_attr_p {
struct parser_common p;
closure_struct(json_attr_parse, parse);
closure_struct(json_attr_name_complete, name_c);
closure_struct(json_attr_value_complete, value_c);
closure_struct(json_attr_error, e);
tuple parent_obj;
string name;
enum {
JSON_ATTR_STATE_NAME,
JSON_ATTR_STATE_VALUE,
} state;
} *json_attr_p;
declare_closure_struct(0, 1, parser, json_obj_parse,
character, in);
declare_closure_struct(0, 1, parser, json_obj_attr_complete,
void *, result);
declare_closure_struct(0, 1, parser, json_obj_attr_error,
string, err);
typedef struct json_obj_p {
struct parser_common p;
closure_struct(json_obj_parse, parse);
closure_struct(json_obj_attr_complete, c);
closure_struct(json_obj_attr_error, e);
tuple obj;
enum {
JSON_OBJ_STATE_ATTR_BEGIN,
JSON_OBJ_STATE_ATTR_END,
} state;
} *json_obj_p;
declare_closure_struct(0, 1, parser, json_array_parse,
character, in);
declare_closure_struct(0, 1, parser, json_array_elem_complete,
void *, result);
declare_closure_struct(0, 1, parser, json_array_elem_error,
string, err);
typedef struct json_array_p {
struct parser_common p;
closure_struct(json_array_parse, parse);
closure_struct(json_array_elem_complete, c);
closure_struct(json_array_elem_error, e);
enum {
JSON_ARRAY_STATE_ELEM_BEGIN,
JSON_ARRAY_STATE_ELEM_END,
} state;
} *json_array_p;
declare_closure_struct(0, 1, parser, json_parse,
character, in);
declare_closure_struct(0, 1, parser, json_complete,
void *, result);
declare_closure_struct(0, 1, parser, json_error,
string, err);
typedef struct json_p {
heap h;
parse_finish finish;
parse_error err;
closure_struct(json_parse, parse);
closure_struct(json_complete, c);
closure_struct(json_error, e);
} *json_p;
static parser json_obj_parser(heap h, parse_finish_internal c, parse_error_internal e);
static parser json_array_parser(heap h, parse_finish_internal c, parse_error_internal e);
static boolean char_is_whitespace(character in)
{
return (runtime_strchr(" \n\r\t", in) != 0);
}
static boolean char_is_numeric(character in)
{
return (runtime_strchr("1234567890.", in) != 0);
}
static parser parse_literal(parser p, character in, int char_index, const char *literal,
int literal_len, parse_finish_internal c, parse_error_internal e)
{
if (in != literal[char_index]) {
string err_string = little_stack_buffer(32);
bprintf(err_string, "unexpected character %c", in);
return apply(e, err_string);
}
if (char_index == literal_len - 1) {
/* Literal value is discarded. */
return apply(c, 0);
}
return p;
}
define_closure_function(0, 1, parser, json_string_parse,
character, in)
{
json_string_p p = struct_from_field(closure_self(), json_string_p, parse);
string s = p->s;
if (in == CHARACTER_INVALID) {
parser next = apply(p->p.e, alloca_wrap_cstring("unexpected end of input"));
deallocate_buffer(s);
deallocate(p->p.h, p, sizeof(*p));
return next;
}
if (!p->escape) {
if (in == '\"') {
parser next = apply(p->p.c, s);
deallocate(p->p.h, p, sizeof(*p));
return next;
}
if (in == '\\')
p->escape = true;
else
push_character(s, in);
} else {
switch (in) {
case 'n':
in = '\n';
break;
case 't':
in = '\t';
break;
case 'r':
in = '\r';
break;
case 'b':
in = '\b';
break;
case 'f':
in = '\f';
break;
}
push_character(s, in);
p->escape = false;
}
return (parser)closure_self();
}
static parser json_string_parser(heap h, parse_finish_internal c, parse_error_internal e)
{
json_string_p p = allocate(h, sizeof(*p));
if (p == INVALID_ADDRESS)
return INVALID_ADDRESS;
p->s = allocate_string(8);
if (p->s == INVALID_ADDRESS) {
deallocate(h, p, sizeof(*p));
return INVALID_ADDRESS;
}
p->escape = false;
p->p.h = h;
p->p.c = c;
p->p.e = e;
return (parser)init_closure(&p->parse, json_string_parse);
}
/* Actual parsing of JSON numbers is not implemented. */
define_closure_function(0, 1, parser, json_number_parse,
character, in)
{
json_number_p p = struct_from_field(closure_self(), json_number_p, parse);
string err_string;
parser next;
if (!char_is_numeric(in)) {
if (!p->digit_found) {
err_string = alloca_wrap_cstring("no digits found");
goto error;
}
parser next = apply(p->p.c, 0);
deallocate(p->p.h, p, sizeof(*p));
return apply(next, in);
}
if (in == '.') {
if (!p->fractional) {
p->fractional = true;
p->digit_found = false;
} else {
err_string = alloca_wrap_cstring("unexpected decimal point");
goto error;
}
} else {
p->digit_found = true;
}
return (parser)closure_self();
error:
next = apply(p->p.e, err_string);
deallocate(p->p.h, p, sizeof(*p));
return next;
}
static parser json_number_parser(heap h, character first, parse_finish_internal c,
parse_error_internal e)
{
json_number_p p = allocate(h, sizeof(*p));
if (p == INVALID_ADDRESS)
return INVALID_ADDRESS;
p->digit_found = p->fractional = false;
p->p.h = h;
p->p.c = c;
p->p.e = e;
parser number_p = (parser)init_closure(&p->parse, json_number_parse);
return (first != '-') ? apply(number_p, first) : number_p;
}
define_closure_function(0, 1, parser, json_boolean_parse,
character, in)
{
json_boolean_p p = struct_from_field(closure_self(), json_boolean_p, parse);
const char *literal = (p->value ? "true" : "false");
int len = (p->value ? 4 : 5);
parser self = (parser)closure_self();
parser next = parse_literal(self, in, p->char_count, literal, len, p->p.c, p->p.e);
if (next == self)
p->char_count++;
else
deallocate(p->p.h, p, sizeof(*p));
return next;
}
static parser json_boolean_parser(heap h, boolean value, parse_finish_internal c,
parse_error_internal e)
{
json_boolean_p p = allocate(h, sizeof(*p));
if (p == INVALID_ADDRESS)
return INVALID_ADDRESS;
p->value = value;
p->char_count = 1;
p->p.h = h;
p->p.c = c;
p->p.e = e;
return (parser)init_closure(&p->parse, json_boolean_parse);
}
define_closure_function(0, 1, parser, json_null_parse,
character, in)
{
json_null_p p = struct_from_field(closure_self(), json_null_p, parse);
parser self = (parser)closure_self();
parser next = parse_literal(self, in, p->char_count, "null", 4, p->p.c, p->p.e);
if (next == self)
p->char_count++;
else
deallocate(p->p.h, p, sizeof(*p));
return next;
}
static parser json_null_parser(heap h, parse_finish_internal c, parse_error_internal e)
{
json_null_p p = allocate(h, sizeof(*p));
if (p == INVALID_ADDRESS)
return INVALID_ADDRESS;
p->char_count = 1;
p->p.h = h;
p->p.c = c;
p->p.e = e;
return (parser)init_closure(&p->parse, json_null_parse);
}
define_closure_function(0, 1, parser, json_value_parse,
character, in)
{
if (char_is_whitespace(in))
return (parser)closure_self();
json_value_p p = struct_from_field(closure_self(), json_value_p, parse);
parse_error_internal e = (parse_error_internal)&p->e;
string err_string;
if (in == CHARACTER_INVALID) {
err_string = alloca_wrap_cstring("unexpected end of input");
goto error;
}
heap h = p->p.h;
parser value_parser;
parse_finish_internal c = (parse_finish_internal)&p->c;
if (char_is_numeric(in))
value_parser = json_number_parser(h, in, c, e);
else
switch (in) {
case '"':
value_parser = json_string_parser(h, c, e);
break;
case '{':
value_parser = json_obj_parser(h, c, e);
break;
case '[':
value_parser = json_array_parser(h, c, e);
break;
case '-':
value_parser = json_number_parser(h, in, c, e);
break;
case 't':
case 'f':
value_parser = json_boolean_parser(h, in == 't', c, e);
break;
case 'n':
value_parser = json_null_parser(h, c, e);
break;
default:
err_string = little_stack_buffer(32);
bprintf(err_string, "unexpected character %c", in);
goto error;
}
if (value_parser != INVALID_ADDRESS)
return value_parser;
err_string = alloca_wrap_cstring("failed to allocate value parser");
error:
return apply(e, err_string);
}
define_closure_function(0, 1, parser, json_value_complete,
void *, result)
{
json_value_p p = struct_from_field(closure_self(), json_value_p, c);
parser next = apply(p->p.c, result);
deallocate(p->p.h, p, sizeof(*p));
return next;
}
define_closure_function(0, 1, parser, json_value_error,
buffer, err)
{
json_value_p p = struct_from_field(closure_self(), json_value_p, e);
parser next = apply(p->p.e, err);
deallocate(p->p.h, p, sizeof(*p));
return next;
}
static parser json_value_parser(heap h, parse_finish_internal c, parse_error_internal e)
{
json_value_p p = allocate(h, sizeof(*p));
if (p == INVALID_ADDRESS)
return INVALID_ADDRESS;
p->p.h = h;
p->p.c = c;
p->p.e = e;
init_closure(&p->c, json_value_complete);
init_closure(&p->e, json_value_error);
return (parser)init_closure(&p->parse, json_value_parse);
}
define_closure_function(0, 1, parser, json_attr_parse,
character, in)
{
json_attr_p p = struct_from_field(closure_self(), json_attr_p, parse);
parse_error_internal e = (parse_error_internal)&p->e;
string err_string = 0;
if (in == CHARACTER_INVALID) {
err_string = alloca_wrap_cstring("unexpected end of input");
goto error;
}
heap h = p->p.h;
switch (p->state) {
case JSON_ATTR_STATE_NAME: {
parser name_p = json_string_parser(h, (parse_finish_internal)&p->name_c, e);
if (name_p != INVALID_ADDRESS)
return apply(name_p, in);
err_string = alloca_wrap_cstring("failed to allocate attribute name parser");
break;
}
case JSON_ATTR_STATE_VALUE: {
if (char_is_whitespace(in))
return (parser)closure_self();
if (in != ':') {
err_string = little_stack_buffer(32);
bprintf(err_string, "unexpected character %c", in);
break;
}
parser value_p = json_value_parser(h, (parse_finish_internal)&p->value_c, e);
if (value_p != INVALID_ADDRESS)
return value_p;
err_string = alloca_wrap_cstring("failed to allocate attribute value parser");
break;
}
}
error:
return apply(e, err_string);
}
define_closure_function(0, 1, parser, json_attr_name_complete,
void *, result)
{
json_attr_p p = struct_from_field(closure_self(), json_attr_p, name_c);
p->name = result;
if (buffer_length(p->name) == 0) {
parse_error_internal e = (parse_error_internal)&p->e;
return apply(e, alloca_wrap_cstring("empty attribute name"));
}
p->state = JSON_ATTR_STATE_VALUE;
return (parser)&p->parse;
}
define_closure_function(0, 1, parser, json_attr_value_complete,
void *, result)
{
json_attr_p p = struct_from_field(closure_self(), json_attr_p, value_c);
if (result)
set(p->parent_obj, intern(p->name), result);
deallocate_buffer(p->name);
parser next = apply(p->p.c, p->parent_obj);
deallocate(p->p.h, p, sizeof(*p));
return next;
}
define_closure_function(0, 1, parser, json_attr_error,
buffer, err)
{
json_attr_p p = struct_from_field(closure_self(), json_attr_p, e);
parser next = apply(p->p.e, err);
if (p->name)
deallocate_buffer(p->name);
deallocate(p->p.h, p, sizeof(*p));
return next;
}
static parser json_attr_parser(heap h, tuple parent_obj, parse_finish_internal c,
parse_error_internal e)
{
json_attr_p p = allocate(h, sizeof(*p));
if (p == INVALID_ADDRESS)
return INVALID_ADDRESS;
p->p.h = h;
p->p.c = c;
p->p.e = e;
p->parent_obj = parent_obj;
p->name = 0;
p->state = JSON_ATTR_STATE_NAME;
init_closure(&p->name_c, json_attr_name_complete);
init_closure(&p->value_c, json_attr_value_complete);
init_closure(&p->e, json_attr_error);
return (parser)init_closure(&p->parse, json_attr_parse);
}
define_closure_function(0, 1, parser, json_obj_parse,
character, in)
{
if (char_is_whitespace(in))
return (parser)closure_self();
json_obj_p p = struct_from_field(closure_self(), json_obj_p, parse);
string err_string;
parser next;
if (in == CHARACTER_INVALID) {
err_string = alloca_wrap_cstring("unexpected end of input");
goto error;
}
switch (p->state) {
case JSON_OBJ_STATE_ATTR_BEGIN:
switch (in) {
case '"': {
parser attr_p = json_attr_parser(p->p.h, p->obj, (parse_finish_internal)&p->c,
(parse_error_internal)&p->e);
if (attr_p != INVALID_ADDRESS)
return attr_p;
err_string = alloca_wrap_cstring("failed to allocate attribute parser");
break;
}
case '}':
goto finish;
default:
goto unexpected_in;
}
case JSON_OBJ_STATE_ATTR_END:
switch (in) {
case ',':
p->state = JSON_OBJ_STATE_ATTR_BEGIN;
break;
case '}':
goto finish;
default:
goto unexpected_in;
}
break;
}
return (parser)closure_self();
unexpected_in:
err_string = little_stack_buffer(32);
bprintf(err_string, "unexpected character %c", in);
error:
destruct_value(p->obj, true);
next = apply(p->p.e, err_string);
deallocate(p->p.h, p, sizeof(*p));
return next;
finish:
next = apply(p->p.c, p->obj);
deallocate(p->p.h, p, sizeof(*p));
return next;
}
define_closure_function(0, 1, parser, json_obj_attr_complete,
void *, result)
{
json_obj_p p = struct_from_field(closure_self(), json_obj_p, c);
p->state = JSON_OBJ_STATE_ATTR_END;
return (parser)&p->parse;
}
define_closure_function(0, 1, parser, json_obj_attr_error,
buffer, err)
{
json_obj_p p = struct_from_field(closure_self(), json_obj_p, e);
destruct_value(p->obj, true);
parser next = apply(p->p.e, err);
deallocate(p->p.h, p, sizeof(*p));
return next;
}
static parser json_obj_parser(heap h, parse_finish_internal c, parse_error_internal e)
{
json_obj_p p = allocate(h, sizeof(*p));
if (p == INVALID_ADDRESS)
return INVALID_ADDRESS;
p->obj = allocate_tuple();
p->p.h = h;
p->p.c = c;
p->p.e = e;
p->state = JSON_OBJ_STATE_ATTR_BEGIN;
init_closure(&p->c, json_obj_attr_complete);
init_closure(&p->e, json_obj_attr_error);
return (parser)init_closure(&p->parse, json_obj_parse);
}
define_closure_function(0, 1, parser, json_array_parse,
character, in)
{
json_array_p p = struct_from_field(closure_self(), json_array_p, parse);
string err_string;
parser next;
if (in == CHARACTER_INVALID) {
err_string = alloca_wrap_cstring("unexpected end of input");
goto error;
}
if (in == ']') {
/* Array contents are discarded. */
next = apply(p->p.c, 0);
deallocate(p->p.h, p, sizeof(*p));
return next;
}
switch (p->state) {
case JSON_ARRAY_STATE_ELEM_BEGIN:
next = json_value_parser(p->p.h, (parse_finish_internal)&p->c, (parse_error_internal)&p->e);
if (next != INVALID_ADDRESS)
return apply(next, in);
err_string = alloca_wrap_cstring("failed to allocate array element parser");
goto error;
case JSON_ARRAY_STATE_ELEM_END:
if (in == ',') {
p->state = JSON_OBJ_STATE_ATTR_BEGIN;
break;
}
err_string = little_stack_buffer(32);
bprintf(err_string, "unexpected character %c", in);
goto error;
}
return (parser)closure_self();
error:
next = apply(p->p.e, err_string);
deallocate(p->p.h, p, sizeof(*p));
return next;
}
define_closure_function(0, 1, parser, json_array_elem_complete,
void *, result)
{
json_array_p p = struct_from_field(closure_self(), json_array_p, c);
if (result)
deallocate_value(result);
p->state = JSON_ARRAY_STATE_ELEM_END;
return (parser)&p->parse;
}
define_closure_function(0, 1, parser, json_array_elem_error,
buffer, err)
{
json_array_p p = struct_from_field(closure_self(), json_array_p, e);
parser next = apply(p->p.e, err);
deallocate(p->p.h, p, sizeof(*p));
return next;
}
static parser json_array_parser(heap h, parse_finish_internal c, parse_error_internal e)
{
json_array_p p = allocate(h, sizeof(*p));
if (p == INVALID_ADDRESS)
return INVALID_ADDRESS;
p->p.h = h;
p->p.c = c;
p->p.e = e;
p->state = JSON_ARRAY_STATE_ELEM_BEGIN;
init_closure(&p->c, json_array_elem_complete);
init_closure(&p->e, json_array_elem_error);
return (parser)init_closure(&p->parse, json_array_parse);
}
define_closure_function(0, 1, parser, json_parse,
character, in)
{
if (char_is_whitespace(in) || (in == CHARACTER_INVALID))
return (parser)closure_self();
json_p p = struct_from_field(closure_self(), json_p, parse);
string err_string;
if (in == '{') {
parser obj_parser = json_obj_parser(p->h, (parse_finish_internal)&p->c,
(parse_error_internal)&p->e);
if (obj_parser != INVALID_ADDRESS)
return obj_parser;
err_string = alloca_wrap_cstring("failed to allocate object parser");
} else {
err_string = little_stack_buffer(32);
bprintf(err_string, "unexpected character %c", in);
}
apply(p->err, err_string);
return (parser)closure_self();
}
define_closure_function(0, 1, parser, json_complete,
void *, result)
{
json_p p = struct_from_field(closure_self(), json_p, c);
apply(p->finish, result);
return (parser)&p->parse;
}
define_closure_function(0, 1, parser, json_error,
buffer, err)
{
json_p p = struct_from_field(closure_self(), json_p, e);
apply(p->err, err);
return (parser)&p->parse;
}
parser json_parser(heap h, parse_finish c, parse_error err)
{
json_p p = allocate(h, sizeof(*p));
if (p == INVALID_ADDRESS)
return INVALID_ADDRESS;
p->h = h;
p->finish = c;
p->err = err;
init_closure(&p->c, json_complete);
init_closure(&p->e, json_error);
return (parser)init_closure(&p->parse, json_parse);
}
void json_parser_free(parser p)
{
json_p jp = struct_from_field(p, json_p, parse);
deallocate(jp->h, jp, sizeof(*jp));
}
|
07849b4b57a8470032eb08626bbe4bab5a81fbdc
|
8a51a96f61699f0318315ccc89cef39f6866f2b5
|
/src/backend/utils/misc/pg_config.c
|
5ca8445a4cfd3b9a18b8c8ca122d668f88c0540f
|
[
"PostgreSQL"
] |
permissive
|
postgres/postgres
|
979febf2b41c00090d1256228f768f33e7ef3b6f
|
b5934bfd6071fed3a38cea0cfaa93afda63d9c0c
|
refs/heads/master
| 2023-08-31T00:10:01.373472
| 2023-08-30T23:07:48
| 2023-08-30T23:07:48
| 927,442
| 13,691
| 4,807
|
NOASSERTION
| 2023-09-09T13:59:15
| 2010-09-21T11:35:45
|
C
|
UTF-8
|
C
| false
| false
| 1,274
|
c
|
pg_config.c
|
/*-------------------------------------------------------------------------
*
* pg_config.c
* Expose same output as pg_config except as an SRF
*
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* src/backend/utils/misc/pg_config.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "catalog/pg_type.h"
#include "common/config_info.h"
#include "funcapi.h"
#include "miscadmin.h"
#include "port.h"
#include "utils/builtins.h"
Datum
pg_config(PG_FUNCTION_ARGS)
{
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
ConfigData *configdata;
size_t configdata_len;
int i = 0;
/* initialize our tuplestore */
InitMaterializedSRF(fcinfo, 0);
configdata = get_configdata(my_exec_path, &configdata_len);
for (i = 0; i < configdata_len; i++)
{
Datum values[2];
bool nulls[2];
memset(values, 0, sizeof(values));
memset(nulls, 0, sizeof(nulls));
values[0] = CStringGetTextDatum(configdata[i].name);
values[1] = CStringGetTextDatum(configdata[i].setting);
tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
}
return (Datum) 0;
}
|
22aa989e807134c5f3caaa55ebf04f11516c8d78
|
0fa1152e1e434ce9fe9e2db95f43f25675bf7d27
|
/src/drivers/distance_sensor/broadcom/afbrs50/API/Src/irq.c
|
d1787c0d25ca3c00f70e8974baec9eab63436cf6
|
[
"BSD-3-Clause"
] |
permissive
|
PX4/PX4-Autopilot
|
4cc90dccc9285ca4db7f595ac5a7547df02ca92e
|
3d61ab84c42ff8623bd48ff0ba74f9cf26bb402b
|
refs/heads/main
| 2023-08-30T23:58:35.398450
| 2022-03-26T01:29:03
| 2023-08-30T15:40:01
| 5,298,790
| 3,146
| 3,798
|
BSD-3-Clause
| 2023-09-14T17:22:04
| 2012-08-04T21:19:36
|
C++
|
UTF-8
|
C
| false
| false
| 775
|
c
|
irq.c
|
#include <nuttx/irq.h>
static volatile irqstate_t irqstate_flags;
static volatile size_t _lock_count = 0;
/*!***************************************************************************
* @brief Enable IRQ Interrupts
*
* @return -
*****************************************************************************/
void IRQ_UNLOCK(void)
{
if (_lock_count > 0) {
_lock_count--;
if (_lock_count == 0) {
leave_critical_section(irqstate_flags);
}
}
}
/*!***************************************************************************
* @brief Disable IRQ Interrupts
*
* @return -
*****************************************************************************/
void IRQ_LOCK(void)
{
if (_lock_count == 0) {
irqstate_flags = enter_critical_section();
}
_lock_count++;
}
|
64b159ffb91e0f5940cc535e84b61d8fa7f9b3f3
|
a8194cf6ffd12f7551eaba53572744080a0bfef3
|
/module/bdev/malloc/bdev_malloc.h
|
044463419749aae30850096e57bb7a0ca39e5920
|
[
"Intel",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
spdk/spdk
|
51294f67104b8c3d18f19147d63a212e9486c687
|
d62a3810364cb87be352c66acf7c7f968508ca17
|
refs/heads/master
| 2023-08-08T16:07:41.263000
| 2023-08-02T09:06:56
| 2023-08-08T07:01:20
| 39,042,157
| 2,708
| 1,158
|
NOASSERTION
| 2023-08-11T09:50:50
| 2015-07-13T23:15:15
|
C
|
UTF-8
|
C
| false
| false
| 862
|
h
|
bdev_malloc.h
|
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2016 Intel Corporation.
* All rights reserved.
* Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*/
#ifndef SPDK_BDEV_MALLOC_H
#define SPDK_BDEV_MALLOC_H
#include "spdk/stdinc.h"
#include "spdk/bdev_module.h"
typedef void (*spdk_delete_malloc_complete)(void *cb_arg, int bdeverrno);
struct malloc_bdev_opts {
char *name;
struct spdk_uuid uuid;
uint64_t num_blocks;
uint32_t block_size;
uint32_t physical_block_size;
uint32_t optimal_io_boundary;
uint32_t md_size;
bool md_interleave;
enum spdk_dif_type dif_type;
bool dif_is_head_of_md;
};
int create_malloc_disk(struct spdk_bdev **bdev, const struct malloc_bdev_opts *opts);
void delete_malloc_disk(const char *name, spdk_delete_malloc_complete cb_fn, void *cb_arg);
#endif /* SPDK_BDEV_MALLOC_H */
|
9664a8d3619fcbce0642cde1f806d5a574881dd4
|
99bdb3251fecee538e0630f15f6574054dfc1468
|
/bsp/allwinner/libraries/sunxi-hal/hal/source/sound/component/aw-alsa-lib/control.c
|
46a7d44c429856197fbd2b205af3ea1b8791aadc
|
[
"Zlib",
"LicenseRef-scancode-proprietary-license",
"MIT",
"BSD-3-Clause",
"X11",
"BSD-4-Clause-UC",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
RT-Thread/rt-thread
|
03a7c52c2aeb1b06a544143b0e803d72f47d1ece
|
3602f891211904a27dcbd51e5ba72fefce7326b2
|
refs/heads/master
| 2023-09-01T04:10:20.295801
| 2023-08-31T16:20:55
| 2023-08-31T16:20:55
| 7,408,108
| 9,599
| 5,805
|
Apache-2.0
| 2023-09-14T13:37:26
| 2013-01-02T14:49:21
|
C
|
UTF-8
|
C
| false
| false
| 3,788
|
c
|
control.c
|
/*
* Copyright (c) 2019-2025 Allwinner Technology Co., Ltd. ALL rights reserved.
*
* Allwinner is a trademark of Allwinner Technology Co.,Ltd., registered in
* the the people's Republic of China and other countries.
* All Allwinner Technology Co.,Ltd. trademarks are used with permission.
*
* DISCLAIMER
* THIRD PARTY LICENCES MAY BE REQUIRED TO IMPLEMENT THE SOLUTION/PRODUCT.
* IF YOU NEED TO INTEGRATE THIRD PARTY’S TECHNOLOGY (SONY, DTS, DOLBY, AVS OR MPEGLA, ETC.)
* IN ALLWINNERS’SDK OR PRODUCTS, YOU SHALL BE SOLELY RESPONSIBLE TO OBTAIN
* ALL APPROPRIATELY REQUIRED THIRD PARTY LICENCES.
* ALLWINNER SHALL HAVE NO WARRANTY, INDEMNITY OR OTHER OBLIGATIONS WITH RESPECT TO MATTERS
* COVERED UNDER ANY REQUIRED THIRD PARTY LICENSE.
* YOU ARE SOLELY RESPONSIBLE FOR YOUR USAGE OF THIRD PARTY’S TECHNOLOGY.
*
*
* THIS SOFTWARE IS PROVIDED BY ALLWINNER"AS IS" AND TO THE MAXIMUM EXTENT
* PERMITTED BY LAW, ALLWINNER EXPRESSLY DISCLAIMS ALL WARRANTIES OF ANY KIND,
* WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING WITHOUT LIMITATION REGARDING
* THE TITLE, NON-INFRINGEMENT, ACCURACY, CONDITION, COMPLETENESS, PERFORMANCE
* OR MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL ALLWINNER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS, OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <string.h>
#include <aw-alsa-lib/pcm.h>
#include <aw-alsa-lib/control.h>
#include <aw-alsa-lib/pcm_plugin.h>
#include "pcm_local.h"
void snd_ctl_open(const char **name)
{
const snd_pcm_config_t *pcm_config = NULL;
char ctl_name[32] = "ctl.!";
strcat(ctl_name, *name);
pcm_config = snd_pcm_config_get_config(ctl_name);
if (!pcm_config)
return ;
struct {
const char *type;
const char *name;
} snd_pcm_open_ctl_name_table[] = {
{ "hw", ((const snd_pcm_hw_config_t *)(pcm_config->config))->card_name},
};
int size = sizeof(snd_pcm_open_ctl_name_table) / sizeof(snd_pcm_open_ctl_name_table[0]);
int i;
for (i = 0; i < size; ++i) {
if (0 == strcmp(pcm_config->type, snd_pcm_open_ctl_name_table[i].type)) {
*name = snd_pcm_open_ctl_name_table[i].name;
break;
}
}
}
int snd_ctl_num(const char *name)
{
snd_ctl_open(&name);
return ksnd_ctl_num(name);
}
int snd_ctl_get_bynum(const char *name, const unsigned int elem_num, snd_ctl_info_t *info)
{
int ret;
snd_ctl_open(&name);
memset(info, 0, sizeof(snd_ctl_info_t));
ret = ksnd_ctl_get_bynum(name, elem_num, info);
return ret;
}
int snd_ctl_get(const char *name, const char *elem, snd_ctl_info_t *info)
{
int ret;
memset(info, 0, sizeof(snd_ctl_info_t));
ret = ksnd_ctl_get(name, elem, info);
awalsa_debug("ret=%d\n", ret);
return ret;
}
int snd_ctl_set_bynum(const char *name, const unsigned int elem_num, unsigned int val)
{
int ret;
ret = ksnd_ctl_set_bynum(name, elem_num, val);
return ret;
}
int snd_ctl_set(const char *name, const char *elem, unsigned int val)
{
int ret;
ret = ksnd_ctl_set(name, elem, val);
return ret;
}
int snd_ctl_add(const char *name, snd_ctl_info_t *info)
{
int ret;
ret = ksnd_ctl_add_elem(name, (void *)info);
return ret;
}
int snd_ctl_remove(const char *name, const unsigned int elem_num)
{
int ret;
ret = ksnd_ctl_remove_elem(name, elem_num);
return ret;
}
|
461a7529fb397f72c8c7384260c3183b4aa838b7
|
e683a0b8dbb87c7ceb99e0d06896174a559d2c67
|
/Tools/esp/printsls.c
|
95fd00b7abe0822c3fc7c507a2136590f9b2ab06
|
[
"Apache-2.0"
] |
permissive
|
bluewaysw/pcgeos
|
f093d79567d977d992f47065056d14d5a04b9f14
|
c6ae4c8e77b54b9ff654c3916f2191f8b1a1b65d
|
refs/heads/master
| 2023-08-31T00:17:54.481175
| 2023-08-29T19:00:49
| 2023-08-29T19:00:49
| 157,968,410
| 603
| 88
|
Apache-2.0
| 2023-09-13T07:44:06
| 2018-11-17T09:09:55
|
Assembly
|
UTF-8
|
C
| false
| false
| 5,538
|
c
|
printsls.c
|
/***********************************************************************
*
* Copyright (c) Berkeley Softworks 1989 -- All Rights Reserved
*
* PROJECT: PCGEOS
* MODULE: printobj.c
* FILE: printobj.c
*
* AUTHOR: Adam de Boor: Aug 30, 1989
*
* ROUTINES:
* Name Description
* ---- -----------
*
* REVISION HISTORY:
* Date Name Description
* ---- ---- -----------
* 8/30/89 ardeb Initial version
*
* DESCRIPTION:
* Program to print out the contents of an object file.
*
***********************************************************************/
#ifndef lint
static char *rcsid =
"$Id: printobj.c,v 3.25 95/02/17 16:27:30 adam Exp $";
#endif lint
#include <config.h>
#include <st.h>
#include <objfmt.h>
#include <objSwap.h>
#include <stdio.h>
#include <ctype.h>
int debug = 0;
int obj_hash_chains; /* used for symfile format compatibility */
VMHandle output;
int geosRelease; /* For VM functions */
int dbcsRelease = 0; /* For VM functions */
int useDecimal = 0;
const char *segtypes[] = {
"private", "common", "stack", "library", "resource", "lmem", "public",
"absolute", "global"
};
const char *registers[] = {
"ax", "bx", "cx", "dx", "sp", "bp", "si", "di",
"es", "cs", "ss", "ds",
"al", "bl", "cl", "dl", "ah", "bh", "ch", "dh"
};
void
DumpSyms(VMHandle file,
VMBlockHandle block,
const char* segName,
int segOff)
{
ObjSymHeader *hdr;
ObjSym *sym;
int n;
word size;
MemHandle mem;
VMBlockHandle next;
while (block != NULL) {
hdr = (ObjSymHeader *)VMLock(file, block, &mem);
MemInfo(mem, (genptr *)NULL, &size);
if (hdr->seg != segOff) {
printf("************** WARNING: hdr->seg (%d) != segOff (%d) ************\n",
hdr->seg, segOff);
}
n = hdr->num;
if (debug) {
printf("Block %04xh, %d symbols, types = %04xh\n", block, n,
hdr->types);
}
for (sym = (ObjSym *)(hdr+1); n > 0; sym++, n--) {
switch(sym->type) {
case OSYM_CLASS:
printf("%i %u %i\n",
segName,
sym->u.class.address,
sym->name);
break;
case OSYM_PROC:
printf(
"%i %u %i\n",
segName,
sym->u.proc.address,
sym->name);
break;
}
next = hdr->next;
VMUnlock(file, block);
block = next;
}
}
}
volatile void
main(int argc, char **argv)
{
short status;
VMBlockHandle map;
ObjSegment *seg;
ObjHeader *hdr;
int i;
extern volatile void exit(int);
extern char *optarg;
extern int optind;
char optchar;
if (argc < 2) {
fprintf(stderr,
"usage: printsls [-d] [-D] <symfile>\n"
"\t-d\tprint values in decimal\n"
"\t-D\tturn on debugging mode\n");
exit(1);
}
while ((optchar = getopt(argc, argv, "Dd")) != (char)EOF) {
switch (optchar) {
case 'D':
debug = 1;
break;
case 'd':
useDecimal = 1;
break;
}
}
output = VMOpen(VMO_OPEN|FILE_DENY_W|FILE_ACCESS_R, 0,
argv[optind],
&status);
if (output == NULL) {
perror(argv[optind]);
exit(1);
}
UtilSetIDFile(output);
map = VMGetMapBlock(output);
hdr = (ObjHeader *)VMLock(output, map, NULL);
switch(hdr->magic)
{
case SWOBJMAGIC:
ObjSwap_Header(hdr);
VMSetReloc(output, ObjSwap_Reloc);
/* FALLTHRU */
case OBJMAGIC:
obj_hash_chains = OBJ_HASH_CHAINS;
break;
case SWOBJMAGIC_NEW_FORMAT:
ObjSwap_Header(hdr);
VMSetReloc(output, ObjSwap_Reloc_NewFormat);
/* FALLTHRU */
case OBJMAGIC_NEW_FORMAT:
obj_hash_chains = OBJ_HASH_CHAINS_NEW_FORMAT;
break;
default:
printf("invalid magic number (is %04x, s/b %04x)\n",
hdr->magic, OBJMAGIC);
exit(1);
}
printf("; protocol: %d.%d; revision: %d.%d.%d.%d\n",
hdr->proto.major, hdr->proto.minor,
hdr->rev.major, hdr->rev.minor, hdr->rev.change, hdr->rev.internal);
if (hdr->entry.frame != NULL) {
ID frame;
ObjSym *sym;
if (hdr->entry.frame > (sizeof(ObjHeader) +
hdr->numSeg * sizeof(ObjSegment)))
{
frame = ((ObjGroup *)((genptr)hdr+hdr->entry.frame))->name;
} else {
frame = ((ObjSegment *)((genptr)hdr+hdr->entry.frame))->name;
}
if (hdr->entry.symBlock == 0) {
//printf("no symbol");
} else {
sym = (ObjSym *)((genptr)VMLock(output, hdr->entry.symBlock, NULL) +
hdr->entry.symOff);
//printf("target = %i", sym->name);
}
//printf (", frame = %i\n", frame);
}
for (i = hdr->numSeg, seg = (ObjSegment *)(hdr+1);
i > 0;
i--, seg++)
{
/*printf("%sSegment %d: name %i, class %i, type %s, alignment %#x, size %5d\n",
i == hdr->numSeg ? "" : "\n=================\n",
hdr->numSeg-i+1,
seg->name, seg->class, segtypes[seg->type], seg->align,
seg->size);
if (seg->type == SEG_ABSOLUTE) {
printf("\tlocated at %04x:0\n", seg->data);
} else {
printf("*** DATA:\n");
DumpBlock(output, seg->data);
}*/
//printf("*** SYMBOLS:\n");
DumpSyms(output, seg->syms, seg->name, (genptr)seg-(genptr)hdr);
//printf("*** RELOCATIONS:\n");
//DumpRel(output, seg->relHead, hdr);
//printf("*** LINES:\n");
//DumpLines(output, seg->lines);
}
VMClose(output);
exit(0);
}
|
6741cdcdcccc669d8df2873b529e89588702a22e
|
35806cf316f378a8ca98c0e244d149e052b1248d
|
/src/runtime/lfstack.c
|
57e0af28299add03c54af1656dbd42e7a72e1e25
|
[
"LicenseRef-scancode-google-patent-license-golang",
"BSD-3-Clause"
] |
permissive
|
gomini/go-mips32
|
337000fe20d8c35e24245d3a9d205defddedad65
|
56b0efd6bad0c9554b5670fa6c3641282dd4d570
|
refs/heads/dev.github
| 2023-03-10T01:34:51.544213
| 2023-02-20T22:40:56
| 2023-02-20T22:40:56
| 43,116,287
| 155
| 52
|
BSD-3-Clause
| 2023-02-20T22:40:57
| 2015-09-25T07:30:35
|
Go
|
UTF-8
|
C
| false
| false
| 2,019
|
c
|
lfstack.c
|
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Lock-free stack.
// The following code runs only on g0 stack.
#include "runtime.h"
#include "arch_GOARCH.h"
#ifdef _64BIT
// Amd64 uses 48-bit virtual addresses, 47-th bit is used as kernel/user flag.
// So we use 17msb of pointers as ABA counter.
# define PTR_BITS 47
#else
# define PTR_BITS 32
#endif
#define PTR_MASK ((1ull<<PTR_BITS)-1)
#define CNT_MASK (0ull-1)
#ifdef _64BIT
#ifdef GOOS_solaris
// SPARC64 and Solaris on AMD64 uses all 64 bits of virtual addresses.
// Use low-order three bits as ABA counter.
// http://docs.oracle.com/cd/E19120-01/open.solaris/816-5138/6mba6ua5p/index.html
#undef PTR_BITS
#undef CNT_MASK
#undef PTR_MASK
#define PTR_BITS 0
#define CNT_MASK 7
#define PTR_MASK ((0ull-1)<<3)
#endif
#endif
void
runtime·lfstackpush(uint64 *head, LFNode *node)
{
uint64 old, new;
if((uintptr)node != ((uintptr)node&PTR_MASK)) {
runtime·printf("p=%p\n", node);
runtime·throw("runtime·lfstackpush: invalid pointer");
}
node->pushcnt++;
new = (uint64)(uintptr)node|(((uint64)node->pushcnt&CNT_MASK)<<PTR_BITS);
for(;;) {
old = runtime·atomicload64(head);
node->next = (LFNode*)(uintptr)(old&PTR_MASK);
if(runtime·cas64(head, old, new))
break;
}
}
LFNode*
runtime·lfstackpop(uint64 *head)
{
LFNode *node, *node2;
uint64 old, new;
for(;;) {
old = runtime·atomicload64(head);
if(old == 0)
return nil;
node = (LFNode*)(uintptr)(old&PTR_MASK);
node2 = runtime·atomicloadp(&node->next);
new = 0;
if(node2 != nil)
new = (uint64)(uintptr)node2|(((uint64)node2->pushcnt&CNT_MASK)<<PTR_BITS);
if(runtime·cas64(head, old, new))
return node;
}
}
void
runtime·lfstackpush_m(void)
{
runtime·lfstackpush(g->m->ptrarg[0], g->m->ptrarg[1]);
g->m->ptrarg[0] = nil;
g->m->ptrarg[1] = nil;
}
void
runtime·lfstackpop_m(void)
{
g->m->ptrarg[0] = runtime·lfstackpop(g->m->ptrarg[0]);
}
|
7cd8689de00ac8b1c9c04299b4b41169f2aeb7e2
|
83372fce92c9088ee5c1e1c13bc67385a3b02a36
|
/src/cc65/output.h
|
a3d2cca8a7cef38bc042a8cab0971161d754839b
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Zlib"
] |
permissive
|
cc65/cc65
|
2cd1b20b61ddd0691cd4601b4da37a9200a50afb
|
16e90187e8e80c6e2fcfcc04f068ac865d2290e5
|
refs/heads/master
| 2023-09-03T17:16:46.027934
| 2023-09-02T22:30:44
| 2023-09-02T22:30:44
| 8,932,489
| 2,103
| 526
|
Zlib
| 2023-09-13T06:19:01
| 2013-03-21T15:56:48
|
C
|
UTF-8
|
C
| false
| false
| 4,168
|
h
|
output.h
|
/*****************************************************************************/
/* */
/* output.h */
/* */
/* Output file handling */
/* */
/* */
/* */
/* (C) 2009-2012, Ullrich von Bassewitz */
/* Roemerstrasse 52 */
/* D-70794 Filderstadt */
/* EMail: uz@cc65.org */
/* */
/* */
/* This software is provided 'as-is', without any expressed or implied */
/* warranty. In no event will the authors be held liable for any damages */
/* arising from the use of this software. */
/* */
/* Permission is granted to anyone to use this software for any purpose, */
/* including commercial applications, and to alter it and redistribute it */
/* freely, subject to the following restrictions: */
/* */
/* 1. The origin of this software must not be misrepresented; you must not */
/* claim that you wrote the original software. If you use this software */
/* in a product, an acknowledgment in the product documentation would be */
/* appreciated but is not required. */
/* 2. Altered source versions must be plainly marked as such, and must not */
/* be misrepresented as being the original software. */
/* 3. This notice may not be removed or altered from any source */
/* distribution. */
/* */
/*****************************************************************************/
#ifndef OUTPUT_H
#define OUTPUT_H
#include <stdio.h>
/* common */
#include "attrib.h"
/*****************************************************************************/
/* Data */
/*****************************************************************************/
/* Name of the output file. Dynamically allocated and read only. */
extern const char* OutputFilename;
/* Output file handle. Use WriteOutput if possible. Read only. */
extern FILE* OutputFile;
/*****************************************************************************/
/* Code */
/*****************************************************************************/
void SetOutputName (const char* Name);
/* Sets the name of the output file. */
void MakeDefaultOutputName (const char* InputFilename);
/* If the name of the output file is empty or NULL, the name of the output
** file is derived from the input file by adjusting the file name extension.
*/
void OpenOutputFile ();
/* Open the output file. Will call Fatal() in case of failures. */
void OpenDebugOutputFile (const char* Name);
/* Open an output file for debugging purposes. Will call Fatal() in case of
** failures.
*/
void CloseOutputFile ();
/* Close the output file. Will call Fatal() in case of failures. */
int WriteOutput (const char* Format, ...) attribute ((format (printf, 1, 2)));
/* Write to the output file using printf like formatting. Returns the number
** of chars written.
*/
/* End of output.h */
#endif
|
2d00db915322c4edeb99951269dfbc45a9f2240a
|
ea9ac7578bc8a8fc83377f651b80e19920338fda
|
/src/gallium/drivers/radeonsi/si_shader.h
|
7734bfd2f5d3d8ea2d598e23ec700b82ef521424
|
[] |
no_license
|
yuq/mesa-lima
|
9d263945e23118c7aa997bb9fcb87ae6bf7ddf0d
|
2adeaa87e813644dcf70f903c0ac909d65ef2972
|
refs/heads/lima-18.1
| 2021-01-25T11:03:44.405200
| 2018-05-27T12:53:15
| 2018-05-27T12:53:15
| 93,909,976
| 186
| 21
| null | 2018-06-05T01:32:44
| 2017-06-10T02:07:43
|
C
|
UTF-8
|
C
| false
| false
| 23,577
|
h
|
si_shader.h
|
/*
* Copyright 2012 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
/* The compiler middle-end architecture: Explaining (non-)monolithic shaders
* -------------------------------------------------------------------------
*
* Typically, there is one-to-one correspondence between API and HW shaders,
* that is, for every API shader, there is exactly one shader binary in
* the driver.
*
* The problem with that is that we also have to emulate some API states
* (e.g. alpha-test, and many others) in shaders too. The two obvious ways
* to deal with it are:
* - each shader has multiple variants for each combination of emulated states,
* and the variants are compiled on demand, possibly relying on a shader
* cache for good performance
* - patch shaders at the binary level
*
* This driver uses something completely different. The emulated states are
* usually implemented at the beginning or end of shaders. Therefore, we can
* split the shader into 3 parts:
* - prolog part (shader code dependent on states)
* - main part (the API shader)
* - epilog part (shader code dependent on states)
*
* Each part is compiled as a separate shader and the final binaries are
* concatenated. This type of shader is called non-monolithic, because it
* consists of multiple independent binaries. Creating a new shader variant
* is therefore only a concatenation of shader parts (binaries) and doesn't
* involve any compilation. The main shader parts are the only parts that are
* compiled when applications create shader objects. The prolog and epilog
* parts are compiled on the first use and saved, so that their binaries can
* be reused by many other shaders.
*
* One of the roles of the prolog part is to compute vertex buffer addresses
* for vertex shaders. A few of the roles of the epilog part are color buffer
* format conversions in pixel shaders that we have to do manually, and write
* tessellation factors in tessellation control shaders. The prolog and epilog
* have many other important responsibilities in various shader stages.
* They don't just "emulate legacy stuff".
*
* Monolithic shaders are shaders where the parts are combined before LLVM
* compilation, and the whole thing is compiled and optimized as one unit with
* one binary on the output. The result is the same as the non-monolithic
* shader, but the final code can be better, because LLVM can optimize across
* all shader parts. Monolithic shaders aren't usually used except for these
* special cases:
*
* 1) Some rarely-used states require modification of the main shader part
* itself, and in such cases, only the monolithic shader variant is
* compiled, and that's always done on the first use.
*
* 2) When we do cross-stage optimizations for separate shader objects and
* e.g. eliminate unused shader varyings, the resulting optimized shader
* variants are always compiled as monolithic shaders, and always
* asynchronously (i.e. not stalling ongoing rendering). We call them
* "optimized monolithic" shaders. The important property here is that
* the non-monolithic unoptimized shader variant is always available for use
* when the asynchronous compilation of the optimized shader is not done
* yet.
*
* Starting with GFX9 chips, some shader stages are merged, and the number of
* shader parts per shader increased. The complete new list of shader parts is:
* - 1st shader: prolog part
* - 1st shader: main part
* - 2nd shader: prolog part
* - 2nd shader: main part
* - 2nd shader: epilog part
*/
/* How linking shader inputs and outputs between vertex, tessellation, and
* geometry shaders works.
*
* Inputs and outputs between shaders are stored in a buffer. This buffer
* lives in LDS (typical case for tessellation), but it can also live
* in memory (ESGS). Each input or output has a fixed location within a vertex.
* The highest used input or output determines the stride between vertices.
*
* Since GS and tessellation are only possible in the OpenGL core profile,
* only these semantics are valid for per-vertex data:
*
* Name Location
*
* POSITION 0
* PSIZE 1
* CLIPDIST0..1 2..3
* CULLDIST0..1 (not implemented)
* GENERIC0..31 4..35
*
* For example, a shader only writing GENERIC0 has the output stride of 5.
*
* Only these semantics are valid for per-patch data:
*
* Name Location
*
* TESSOUTER 0
* TESSINNER 1
* PATCH0..29 2..31
*
* That's how independent shaders agree on input and output locations.
* The si_shader_io_get_unique_index function assigns the locations.
*
* For tessellation, other required information for calculating the input and
* output addresses like the vertex stride, the patch stride, and the offsets
* where per-vertex and per-patch data start, is passed to the shader via
* user data SGPRs. The offsets and strides are calculated at draw time and
* aren't available at compile time.
*/
#ifndef SI_SHADER_H
#define SI_SHADER_H
#include <llvm-c/Core.h> /* LLVMModuleRef */
#include <llvm-c/TargetMachine.h>
#include "tgsi/tgsi_scan.h"
#include "util/u_queue.h"
#include "ac_binary.h"
#include "ac_llvm_build.h"
#include "si_state.h"
struct nir_shader;
#define SI_MAX_VS_OUTPUTS 40
/* Shader IO unique indices are supported for TGSI_SEMANTIC_GENERIC with an
* index smaller than this.
*/
#define SI_MAX_IO_GENERIC 46
/* SGPR user data indices */
enum {
SI_SGPR_RW_BUFFERS, /* rings (& stream-out, VS only) */
#if !HAVE_32BIT_POINTERS
SI_SGPR_RW_BUFFERS_HI,
#endif
SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES,
#if !HAVE_32BIT_POINTERS
SI_SGPR_BINDLESS_SAMPLERS_AND_IMAGES_HI,
#endif
SI_SGPR_CONST_AND_SHADER_BUFFERS, /* or just a constant buffer 0 pointer */
#if !HAVE_32BIT_POINTERS
SI_SGPR_CONST_AND_SHADER_BUFFERS_HI,
#endif
SI_SGPR_SAMPLERS_AND_IMAGES,
#if !HAVE_32BIT_POINTERS
SI_SGPR_SAMPLERS_AND_IMAGES_HI,
#endif
SI_NUM_RESOURCE_SGPRS,
/* all VS variants */
SI_SGPR_BASE_VERTEX = SI_NUM_RESOURCE_SGPRS,
SI_SGPR_START_INSTANCE,
SI_SGPR_DRAWID,
SI_SGPR_VS_STATE_BITS,
SI_VS_NUM_USER_SGPR,
SI_SGPR_VS_BLIT_DATA = SI_SGPR_CONST_AND_SHADER_BUFFERS,
/* TES */
SI_SGPR_TES_OFFCHIP_LAYOUT = SI_NUM_RESOURCE_SGPRS,
SI_SGPR_TES_OFFCHIP_ADDR,
SI_TES_NUM_USER_SGPR,
/* GFX6-8: TCS only */
GFX6_SGPR_TCS_OFFCHIP_LAYOUT = SI_NUM_RESOURCE_SGPRS,
GFX6_SGPR_TCS_OUT_OFFSETS,
GFX6_SGPR_TCS_OUT_LAYOUT,
GFX6_SGPR_TCS_IN_LAYOUT,
GFX6_TCS_NUM_USER_SGPR,
/* GFX9: Merged shaders. */
#if HAVE_32BIT_POINTERS
/* 2ND_CONST_AND_SHADER_BUFFERS is set in USER_DATA_ADDR_LO (SGPR0). */
/* 2ND_SAMPLERS_AND_IMAGES is set in USER_DATA_ADDR_HI (SGPR1). */
GFX9_MERGED_NUM_USER_SGPR = SI_VS_NUM_USER_SGPR,
#else
/* 2ND_CONST_AND_SHADER_BUFFERS is set in USER_DATA_ADDR_LO/HI (SGPR[0:1]). */
GFX9_SGPR_2ND_SAMPLERS_AND_IMAGES = SI_VS_NUM_USER_SGPR,
GFX9_SGPR_2ND_SAMPLERS_AND_IMAGES_HI,
GFX9_MERGED_NUM_USER_SGPR,
#endif
/* GFX9: Merged LS-HS (VS-TCS) only. */
GFX9_SGPR_TCS_OFFCHIP_LAYOUT = GFX9_MERGED_NUM_USER_SGPR,
GFX9_SGPR_TCS_OUT_OFFSETS,
GFX9_SGPR_TCS_OUT_LAYOUT,
#if !HAVE_32BIT_POINTERS
GFX9_SGPR_align_for_vb_pointer,
#endif
GFX9_TCS_NUM_USER_SGPR,
/* GS limits */
GFX6_GS_NUM_USER_SGPR = SI_NUM_RESOURCE_SGPRS,
#if HAVE_32BIT_POINTERS
GFX9_VSGS_NUM_USER_SGPR = SI_VS_NUM_USER_SGPR,
GFX9_TESGS_NUM_USER_SGPR = SI_TES_NUM_USER_SGPR,
#else
GFX9_VSGS_NUM_USER_SGPR = GFX9_MERGED_NUM_USER_SGPR,
GFX9_TESGS_NUM_USER_SGPR = GFX9_MERGED_NUM_USER_SGPR,
#endif
SI_GSCOPY_NUM_USER_SGPR = SI_SGPR_RW_BUFFERS + (HAVE_32BIT_POINTERS ? 1 : 2),
/* PS only */
SI_SGPR_ALPHA_REF = SI_NUM_RESOURCE_SGPRS,
SI_PS_NUM_USER_SGPR,
};
/* LLVM function parameter indices */
enum {
SI_NUM_RESOURCE_PARAMS = 4,
/* PS only parameters */
SI_PARAM_ALPHA_REF = SI_NUM_RESOURCE_PARAMS,
SI_PARAM_PRIM_MASK,
SI_PARAM_PERSP_SAMPLE,
SI_PARAM_PERSP_CENTER,
SI_PARAM_PERSP_CENTROID,
SI_PARAM_PERSP_PULL_MODEL,
SI_PARAM_LINEAR_SAMPLE,
SI_PARAM_LINEAR_CENTER,
SI_PARAM_LINEAR_CENTROID,
SI_PARAM_LINE_STIPPLE_TEX,
SI_PARAM_POS_X_FLOAT,
SI_PARAM_POS_Y_FLOAT,
SI_PARAM_POS_Z_FLOAT,
SI_PARAM_POS_W_FLOAT,
SI_PARAM_FRONT_FACE,
SI_PARAM_ANCILLARY,
SI_PARAM_SAMPLE_COVERAGE,
SI_PARAM_POS_FIXED_PT,
SI_NUM_PARAMS = SI_PARAM_POS_FIXED_PT + 9, /* +8 for COLOR[0..1] */
};
/* Fields of driver-defined VS state SGPR. */
/* Clamp vertex color output (only used in VS as VS). */
#define S_VS_STATE_CLAMP_VERTEX_COLOR(x) (((unsigned)(x) & 0x1) << 0)
#define C_VS_STATE_CLAMP_VERTEX_COLOR 0xFFFFFFFE
#define S_VS_STATE_INDEXED(x) (((unsigned)(x) & 0x1) << 1)
#define C_VS_STATE_INDEXED 0xFFFFFFFD
#define S_VS_STATE_LS_OUT_PATCH_SIZE(x) (((unsigned)(x) & 0x1FFF) << 8)
#define C_VS_STATE_LS_OUT_PATCH_SIZE 0xFFE000FF
#define S_VS_STATE_LS_OUT_VERTEX_SIZE(x) (((unsigned)(x) & 0xFF) << 24)
#define C_VS_STATE_LS_OUT_VERTEX_SIZE 0x00FFFFFF
/* SI-specific system values. */
enum {
TGSI_SEMANTIC_DEFAULT_TESSOUTER_SI = TGSI_SEMANTIC_COUNT,
TGSI_SEMANTIC_DEFAULT_TESSINNER_SI,
};
enum {
/* Use a property enum that VS wouldn't use. */
TGSI_PROPERTY_VS_BLIT_SGPRS = TGSI_PROPERTY_FS_COORD_ORIGIN,
/* These represent the number of SGPRs the shader uses. */
SI_VS_BLIT_SGPRS_POS = 3,
SI_VS_BLIT_SGPRS_POS_COLOR = 7,
SI_VS_BLIT_SGPRS_POS_TEXCOORD = 9,
};
/* For VS shader key fix_fetch. */
enum {
SI_FIX_FETCH_NONE = 0,
SI_FIX_FETCH_A2_SNORM,
SI_FIX_FETCH_A2_SSCALED,
SI_FIX_FETCH_A2_SINT,
SI_FIX_FETCH_RGBA_32_UNORM,
SI_FIX_FETCH_RGBX_32_UNORM,
SI_FIX_FETCH_RGBA_32_SNORM,
SI_FIX_FETCH_RGBX_32_SNORM,
SI_FIX_FETCH_RGBA_32_USCALED,
SI_FIX_FETCH_RGBA_32_SSCALED,
SI_FIX_FETCH_RGBA_32_FIXED,
SI_FIX_FETCH_RGBX_32_FIXED,
SI_FIX_FETCH_RG_64_FLOAT,
SI_FIX_FETCH_RGB_64_FLOAT,
SI_FIX_FETCH_RGBA_64_FLOAT,
SI_FIX_FETCH_RGB_8, /* A = 1.0 */
SI_FIX_FETCH_RGB_8_INT, /* A = 1 */
SI_FIX_FETCH_RGB_16,
SI_FIX_FETCH_RGB_16_INT,
};
struct si_shader;
/* State of the context creating the shader object. */
struct si_compiler_ctx_state {
/* Should only be used by si_init_shader_selector_async and
* si_build_shader_variant if thread_index == -1 (non-threaded). */
LLVMTargetMachineRef tm;
/* Used if thread_index == -1 or if debug.async is true. */
struct pipe_debug_callback debug;
/* Used for creating the log string for gallium/ddebug. */
bool is_debug_context;
};
/* A shader selector is a gallium CSO and contains shader variants and
* binaries for one TGSI program. This can be shared by multiple contexts.
*/
struct si_shader_selector {
struct pipe_reference reference;
struct si_screen *screen;
struct util_queue_fence ready;
struct si_compiler_ctx_state compiler_ctx_state;
mtx_t mutex;
struct si_shader *first_variant; /* immutable after the first variant */
struct si_shader *last_variant; /* mutable */
/* The compiled TGSI shader expecting a prolog and/or epilog (not
* uploaded to a buffer).
*/
struct si_shader *main_shader_part;
struct si_shader *main_shader_part_ls; /* as_ls is set in the key */
struct si_shader *main_shader_part_es; /* as_es is set in the key */
struct si_shader *gs_copy_shader;
struct tgsi_token *tokens;
struct nir_shader *nir;
struct pipe_stream_output_info so;
struct tgsi_shader_info info;
struct tgsi_tessctrl_info tcs_info;
/* PIPE_SHADER_[VERTEX|FRAGMENT|...] */
unsigned type;
bool vs_needs_prolog;
bool force_correct_derivs_after_kill;
unsigned pa_cl_vs_out_cntl;
ubyte clipdist_mask;
ubyte culldist_mask;
/* ES parameters. */
unsigned esgs_itemsize;
/* GS parameters. */
unsigned gs_input_verts_per_prim;
unsigned gs_output_prim;
unsigned gs_max_out_vertices;
unsigned gs_num_invocations;
unsigned max_gs_stream; /* count - 1 */
unsigned gsvs_vertex_size;
unsigned max_gsvs_emit_size;
unsigned enabled_streamout_buffer_mask;
/* PS parameters. */
unsigned color_attr_index[2];
unsigned db_shader_control;
/* Set 0xf or 0x0 (4 bits) per each written output.
* ANDed with spi_shader_col_format.
*/
unsigned colors_written_4bit;
/* CS parameters */
unsigned local_size;
uint64_t outputs_written; /* "get_unique_index" bits */
uint32_t patch_outputs_written; /* "get_unique_index_patch" bits */
uint64_t inputs_read; /* "get_unique_index" bits */
/* bitmasks of used descriptor slots */
uint32_t active_const_and_shader_buffers;
uint64_t active_samplers_and_images;
};
/* Valid shader configurations:
*
* API shaders VS | TCS | TES | GS |pass| PS
* are compiled as: | | | |thru|
* | | | | |
* Only VS & PS: VS | | | | | PS
* GFX6 - with GS: ES | | | GS | VS | PS
* - with tess: LS | HS | VS | | | PS
* - with both: LS | HS | ES | GS | VS | PS
* GFX9 - with GS: -> | | | GS | VS | PS
* - with tess: -> | HS | VS | | | PS
* - with both: -> | HS | -> | GS | VS | PS
*
* -> = merged with the next stage
*/
/* Use the byte alignment for all following structure members for optimal
* shader key memory footprint.
*/
#pragma pack(push, 1)
/* Common VS bits between the shader key and the prolog key. */
struct si_vs_prolog_bits {
/* - If neither "is_one" nor "is_fetched" has a bit set, the instance
* divisor is 0.
* - If "is_one" has a bit set, the instance divisor is 1.
* - If "is_fetched" has a bit set, the instance divisor will be loaded
* from the constant buffer.
*/
uint16_t instance_divisor_is_one; /* bitmask of inputs */
uint16_t instance_divisor_is_fetched; /* bitmask of inputs */
unsigned ls_vgpr_fix:1;
};
/* Common TCS bits between the shader key and the epilog key. */
struct si_tcs_epilog_bits {
unsigned prim_mode:3;
unsigned invoc0_tess_factors_are_def:1;
unsigned tes_reads_tess_factors:1;
};
struct si_gs_prolog_bits {
unsigned tri_strip_adj_fix:1;
unsigned gfx9_prev_is_vs:1;
};
/* Common PS bits between the shader key and the prolog key. */
struct si_ps_prolog_bits {
unsigned color_two_side:1;
unsigned flatshade_colors:1;
unsigned poly_stipple:1;
unsigned force_persp_sample_interp:1;
unsigned force_linear_sample_interp:1;
unsigned force_persp_center_interp:1;
unsigned force_linear_center_interp:1;
unsigned bc_optimize_for_persp:1;
unsigned bc_optimize_for_linear:1;
unsigned samplemask_log_ps_iter:3;
};
/* Common PS bits between the shader key and the epilog key. */
struct si_ps_epilog_bits {
unsigned spi_shader_col_format;
unsigned color_is_int8:8;
unsigned color_is_int10:8;
unsigned last_cbuf:3;
unsigned alpha_func:3;
unsigned alpha_to_one:1;
unsigned poly_line_smoothing:1;
unsigned clamp_color:1;
};
union si_shader_part_key {
struct {
struct si_vs_prolog_bits states;
unsigned num_input_sgprs:6;
/* For merged stages such as LS-HS, HS input VGPRs are first. */
unsigned num_merged_next_stage_vgprs:3;
unsigned last_input:4;
unsigned as_ls:1;
unsigned as_es:1;
/* Prologs for monolithic shaders shouldn't set EXEC. */
unsigned is_monolithic:1;
} vs_prolog;
struct {
struct si_tcs_epilog_bits states;
} tcs_epilog;
struct {
struct si_gs_prolog_bits states;
/* Prologs of monolithic shaders shouldn't set EXEC. */
unsigned is_monolithic:1;
} gs_prolog;
struct {
struct si_ps_prolog_bits states;
unsigned num_input_sgprs:6;
unsigned num_input_vgprs:5;
/* Color interpolation and two-side color selection. */
unsigned colors_read:8; /* color input components read */
unsigned num_interp_inputs:5; /* BCOLOR is at this location */
unsigned face_vgpr_index:5;
unsigned ancillary_vgpr_index:5;
unsigned wqm:1;
char color_attr_index[2];
char color_interp_vgpr_index[2]; /* -1 == constant */
} ps_prolog;
struct {
struct si_ps_epilog_bits states;
unsigned colors_written:8;
unsigned writes_z:1;
unsigned writes_stencil:1;
unsigned writes_samplemask:1;
} ps_epilog;
};
struct si_shader_key {
/* Prolog and epilog flags. */
union {
struct {
struct si_vs_prolog_bits prolog;
} vs;
struct {
struct si_vs_prolog_bits ls_prolog; /* for merged LS-HS */
struct si_shader_selector *ls; /* for merged LS-HS */
struct si_tcs_epilog_bits epilog;
} tcs; /* tessellation control shader */
struct {
struct si_vs_prolog_bits vs_prolog; /* for merged ES-GS */
struct si_shader_selector *es; /* for merged ES-GS */
struct si_gs_prolog_bits prolog;
} gs;
struct {
struct si_ps_prolog_bits prolog;
struct si_ps_epilog_bits epilog;
} ps;
} part;
/* These two are initially set according to the NEXT_SHADER property,
* or guessed if the property doesn't seem correct.
*/
unsigned as_es:1; /* export shader, which precedes GS */
unsigned as_ls:1; /* local shader, which precedes TCS */
/* Flags for monolithic compilation only. */
struct {
/* One byte for every input: SI_FIX_FETCH_* enums. */
uint8_t vs_fix_fetch[SI_MAX_ATTRIBS];
union {
uint64_t ff_tcs_inputs_to_copy; /* for fixed-func TCS */
/* When PS needs PrimID and GS is disabled. */
unsigned vs_export_prim_id:1;
struct {
unsigned interpolate_at_sample_force_center:1;
unsigned fbfetch_msaa;
unsigned fbfetch_is_1D;
unsigned fbfetch_layered;
} ps;
} u;
} mono;
/* Optimization flags for asynchronous compilation only. */
struct {
/* For HW VS (it can be VS, TES, GS) */
uint64_t kill_outputs; /* "get_unique_index" bits */
unsigned clip_disable:1;
/* For shaders where monolithic variants have better code.
*
* This is a flag that has no effect on code generation,
* but forces monolithic shaders to be used as soon as
* possible, because it's in the "opt" group.
*/
unsigned prefer_mono:1;
} opt;
};
/* Restore the pack alignment to default. */
#pragma pack(pop)
struct si_shader_config {
unsigned num_sgprs;
unsigned num_vgprs;
unsigned spilled_sgprs;
unsigned spilled_vgprs;
unsigned private_mem_vgprs;
unsigned lds_size;
unsigned max_simd_waves;
unsigned spi_ps_input_ena;
unsigned spi_ps_input_addr;
unsigned float_mode;
unsigned scratch_bytes_per_wave;
unsigned rsrc1;
unsigned rsrc2;
};
/* GCN-specific shader info. */
struct si_shader_info {
ubyte vs_output_param_offset[SI_MAX_VS_OUTPUTS];
ubyte num_input_sgprs;
ubyte num_input_vgprs;
signed char face_vgpr_index;
signed char ancillary_vgpr_index;
bool uses_instanceid;
ubyte nr_pos_exports;
ubyte nr_param_exports;
};
struct si_shader {
struct si_compiler_ctx_state compiler_ctx_state;
struct si_shader_selector *selector;
struct si_shader_selector *previous_stage_sel; /* for refcounting */
struct si_shader *next_variant;
struct si_shader_part *prolog;
struct si_shader *previous_stage; /* for GFX9 */
struct si_shader_part *prolog2;
struct si_shader_part *epilog;
struct si_pm4_state *pm4;
struct r600_resource *bo;
struct r600_resource *scratch_bo;
struct si_shader_key key;
struct util_queue_fence ready;
bool compilation_failed;
bool is_monolithic;
bool is_optimized;
bool is_binary_shared;
bool is_gs_copy_shader;
/* The following data is all that's needed for binary shaders. */
struct ac_shader_binary binary;
struct si_shader_config config;
struct si_shader_info info;
/* Shader key + LLVM IR + disassembly + statistics.
* Generated for debug contexts only.
*/
char *shader_log;
size_t shader_log_size;
};
struct si_shader_part {
struct si_shader_part *next;
union si_shader_part_key key;
struct ac_shader_binary binary;
struct si_shader_config config;
};
/* si_shader.c */
struct si_shader *
si_generate_gs_copy_shader(struct si_screen *sscreen,
LLVMTargetMachineRef tm,
struct si_shader_selector *gs_selector,
struct pipe_debug_callback *debug);
int si_compile_tgsi_shader(struct si_screen *sscreen,
LLVMTargetMachineRef tm,
struct si_shader *shader,
bool is_monolithic,
struct pipe_debug_callback *debug);
int si_shader_create(struct si_screen *sscreen, LLVMTargetMachineRef tm,
struct si_shader *shader,
struct pipe_debug_callback *debug);
void si_shader_destroy(struct si_shader *shader);
unsigned si_shader_io_get_unique_index_patch(unsigned semantic_name, unsigned index);
unsigned si_shader_io_get_unique_index(unsigned semantic_name, unsigned index);
int si_shader_binary_upload(struct si_screen *sscreen, struct si_shader *shader);
void si_shader_dump(struct si_screen *sscreen, const struct si_shader *shader,
struct pipe_debug_callback *debug, unsigned processor,
FILE *f, bool check_debug_option);
void si_shader_dump_stats_for_shader_db(const struct si_shader *shader,
struct pipe_debug_callback *debug);
void si_multiwave_lds_size_workaround(struct si_screen *sscreen,
unsigned *lds_size);
void si_shader_apply_scratch_relocs(struct si_shader *shader,
uint64_t scratch_va);
void si_shader_binary_read_config(struct ac_shader_binary *binary,
struct si_shader_config *conf,
unsigned symbol_offset);
const char *si_get_shader_name(const struct si_shader *shader, unsigned processor);
/* si_shader_nir.c */
void si_nir_scan_shader(const struct nir_shader *nir,
struct tgsi_shader_info *info);
void si_nir_scan_tess_ctrl(const struct nir_shader *nir,
const struct tgsi_shader_info *info,
struct tgsi_tessctrl_info *out);
void si_lower_nir(struct si_shader_selector *sel);
/* Inline helpers. */
/* Return the pointer to the main shader part's pointer. */
static inline struct si_shader **
si_get_main_shader_part(struct si_shader_selector *sel,
struct si_shader_key *key)
{
if (key->as_ls)
return &sel->main_shader_part_ls;
if (key->as_es)
return &sel->main_shader_part_es;
return &sel->main_shader_part;
}
static inline bool
si_shader_uses_bindless_samplers(struct si_shader_selector *selector)
{
return selector ? selector->info.uses_bindless_samplers : false;
}
static inline bool
si_shader_uses_bindless_images(struct si_shader_selector *selector)
{
return selector ? selector->info.uses_bindless_images : false;
}
void si_destroy_shader_selector(struct si_context *sctx,
struct si_shader_selector *sel);
static inline void
si_shader_selector_reference(struct si_context *sctx,
struct si_shader_selector **dst,
struct si_shader_selector *src)
{
if (pipe_reference(&(*dst)->reference, &src->reference))
si_destroy_shader_selector(sctx, *dst);
*dst = src;
}
#endif
|
1e913a3a9cf67cbfb27677b21d5dbf1ff6d1ffd8
|
fff6d13af91db925a94b3e3474108fafd519bba0
|
/src/Platform/D3D/std3D.c
|
a4a8b3636a4c65ae9e7ffad886abedef9fe819d5
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
shinyquagsire23/OpenJKDF2
|
94f1364628924ac944466107606b91c6faeec3df
|
b79b8c210878b6f276ed3d1a5dad91d9219e6ce1
|
refs/heads/master
| 2023-07-08T16:41:38.986577
| 2023-06-28T08:47:39
| 2023-06-28T08:47:39
| 139,542,792
| 381
| 34
|
NOASSERTION
| 2023-06-28T08:47:40
| 2018-07-03T07:09:45
|
C
|
UTF-8
|
C
| false
| false
| 1,947
|
c
|
std3D.c
|
#include "Platform/std3D.h"
// Added helpers
int std3D_HasAlpha()
{
return d3d_device_ptr->hasAlpha;
}
int std3D_HasModulateAlpha()
{
return d3d_device_ptr->hasModulateAlpha;
}
int std3D_HasAlphaFlatStippled()
{
return d3d_device_ptr->hasAlphaFlatStippled;
}
void std3D_InitializeViewport(rdRect *viewRect)
{
signed int v1; // ebx
signed int height; // ebp
float viewXMax_2; // [esp+14h] [ebp+4h]
float viewRectYMax; // [esp+14h] [ebp+4h]
std3D_rectViewIdk.x = viewRect->x;
v1 = viewRect->width;
std3D_rectViewIdk.y = viewRect->y;
std3D_rectViewIdk.width = v1;
height = viewRect->height;
memset(std3D_aViewIdk, 0, sizeof(std3D_aViewIdk));
std3D_aViewIdk[0] = (float)std3D_rectViewIdk.x;
std3D_aViewIdk[1] = (float)std3D_rectViewIdk.y;
std3D_rectViewIdk.height = height;
std3D_aViewTris[0].v1 = 0;
std3D_aViewTris[0].v2 = 1;
viewXMax_2 = (float)(v1 + std3D_rectViewIdk.x);
std3D_aViewIdk[8] = viewXMax_2;
std3D_aViewIdk[9] = std3D_aViewIdk[1];
std3D_aViewIdk[16] = viewXMax_2;
viewRectYMax = (float)(height + std3D_rectViewIdk.y);
std3D_aViewTris[0].texture = 0;
std3D_aViewIdk[17] = viewRectYMax;
std3D_aViewIdk[25] = viewRectYMax;
std3D_aViewIdk[24] = std3D_aViewIdk[0];
std3D_aViewTris[0].v3 = 2;
std3D_aViewTris[0].flags = 0x8200;
std3D_aViewTris[1].v1 = 0;
std3D_aViewTris[1].v2 = 2;
std3D_aViewTris[1].v3 = 3;
std3D_aViewTris[1].texture = 0;
std3D_aViewTris[1].flags = 0x8200;
}
int std3D_GetValidDimensions(int a1, int a2, int a3, int a4)
{
int result; // eax
std3D_gpuMaxTexSizeMaybe = a1;
result = a4;
std3D_dword_53D66C = a2;
std3D_dword_53D670 = a3;
std3D_dword_53D674 = a4;
return result;
}
int std3D_SetRenderList(intptr_t a1)
{
std3D_renderList = a1;
return std3D_CreateExecuteBuffer();
}
intptr_t std3D_GetRenderList()
{
return std3D_renderList;
}
|
04e10ef99ce5ce7ca5a0eabf888bc328edf73215
|
1af43c4ba32d78c60f007a4d068136ce575d917f
|
/tools/rtl433/rtltest/devices/elro_db286a.c
|
5a3db14363816c9871dd68147cd005375ab17a65
|
[
"MIT"
] |
permissive
|
gabonator/LA104
|
a4f1cdf2b3e513300d61c50fff091c5717abda9e
|
27d0eece7302c479da2cf86e881b6a51a535f93d
|
refs/heads/master
| 2023-08-31T22:09:36.272616
| 2023-08-27T20:08:08
| 2023-08-27T20:08:08
| 155,659,451
| 500
| 69
|
MIT
| 2023-08-17T08:44:32
| 2018-11-01T03:54:21
|
C
|
UTF-8
|
C
| false
| false
| 1,731
|
c
|
elro_db286a.c
|
/* Generic doorbell implementation for Elro DB286A devices
*
* Note that each device seems to have two codes, which alternate
* for every other button press.
*
* short is 456 us pulse, 1540 us gap
* long is 1448 us pulse, 544 us gap
* packet gap is 7016 us
*
* Example code: 37f62a6c80
*
* Copyright (C) 2016 Fabian Zaremba <fabian@youremail.eu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include "decoder.h"
static int elro_db286a_callback(r_device *decoder, bitbuffer_t *bitbuffer)
{
data_t *data;
uint8_t *b;
char id_str[4*2+1];
// 33 bits expected, 5 minimum packet repetitions (14 expected)
int row = bitbuffer_find_repeated_row(bitbuffer, 5, 33);
if (row < 0 || bitbuffer->bits_per_row[row] != 33)
return 0;
b = bitbuffer->bb[row];
// 32 bits, trailing bit is dropped
sprintf(id_str, "%02x%02x%02x%02x", b[0], b[1], b[2], b[3]);
data = data_make(
"model", "", DATA_STRING, "Elro-DB286A",
"id", "ID", DATA_STRING, id_str,
NULL);
decoder_output_data(decoder, data);
return 1;
}
static char *output_fields[] = {
"model",
"id",
NULL
};
r_device elro_db286a = {
.name = "Elro DB286A Doorbell",
.modulation = OOK_PULSE_PWM,
.short_width = 456,
.long_width = 1448,
.gap_limit = 2000,
.reset_limit = 8000,
.decode_fn = &elro_db286a_callback,
.disabled = 0,
.fields = output_fields
};
|
553ad1d6a2589e7bca6e30e52c5e37f851e2b4bf
|
2d05050d0ada29f7680b4df20c10bb85b0530e45
|
/src/runtime/hexagon/rpc/simulator/hexagon_sim_proto.h
|
fec9d7329dbed3c40bb474232e15c4bbd850c8ef
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
apache/tvm
|
87cb617f9a131fa44e1693303aaddf70e7a4c403
|
d75083cd97ede706338ab413dbc964009456d01b
|
refs/heads/main
| 2023-09-04T11:24:26.263032
| 2023-09-04T07:26:00
| 2023-09-04T07:26:00
| 70,746,484
| 4,575
| 1,903
|
Apache-2.0
| 2023-09-14T19:06:33
| 2016-10-12T22:20:28
|
Python
|
UTF-8
|
C
| false
| false
| 2,585
|
h
|
hexagon_sim_proto.h
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_RUNTIME_HEXAGON_RPC_SIMULATOR_HEXAGON_SIM_PROTO_H_
#define TVM_RUNTIME_HEXAGON_RPC_SIMULATOR_HEXAGON_SIM_PROTO_H_
struct Message {
enum : uint32_t {
kNone = 0,
kAck,
kTerminate,
kReceiveStart,
kReceiveEnd,
kSendStart,
kSendEnd,
};
enum : uint32_t {
null_va = 0,
};
uint32_t code;
uint32_t len;
uint32_t va;
} __attribute__((packed));
// Protocol:
//
// Copying data from host to remote:
//
// Host >-- [ kReceiveStart, len, null_va ] --> Remote
// * Remote client prepares a buffer with at least `len` bytes.
// Host <-- [ kAck, buf_size, buf_ptr ] <-- Remote
// * Host writes `nbytes` into buffer, `nbytes` <= `len`.
// Host >-- [ kReceiveEnd, nbytes, buf_ptr ] --> Remote
// * Remote client processes the data.
// Host <-- [ kAck, ___, ___ ] <-- Remote
//
// Copying data from remote to host:
//
// Host >-- [ kSendStart, len, null_va ] --> Remote
// * Remote client returns pointer to the buffer with the data to be read.
// * There should be at least `len` bytes ready in the buffer.
// Host <-- [ kAck, buf_size, buf_ptr ] <-- Remote
// * Host reads `nbytes` from buffer, `nbytes` <= `buf_size`.
// Host >-- [ kSendEnd , nbytes, buf_ptr ] --> Remote
// * Remote client processes the data.
// Host <-- [ kAck, ___, ___ ] <-- Remote
//
// Teminating server:
//
// Host >-- [ kTerminate, ___, ___ ] --> Remote
// Host <-- [ kAck, ___, ___ ] <-- Remote
// * Host continues execution of the client.
// * Client terminates.
#define DISPATCH_FUNCTION_NAME dispatch_875b2e3a28186123
#define MESSAGE_BUFFER_NAME message_buffer_71d6a7b93c318d7e
#endif // TVM_RUNTIME_HEXAGON_RPC_SIMULATOR_HEXAGON_SIM_PROTO_H_
|
8251036979dadaa135b44bf9c3b63eb04be315f3
|
42bde3cb96d1ec95a00805f00f6c96d97bc2fe34
|
/ext/event_loop.c
|
cd8d42fddb07d5fc49ccefa24cd1957683a3365a
|
[
"Apache-2.0"
] |
permissive
|
awslabs/aws-crt-php
|
69603d4c1502ea676ecacfbb8ac3f74b0449dba2
|
ed1a708b48b1ca586a916f9203b3420ee42c2ef7
|
refs/heads/main
| 2023-09-02T20:20:28.627140
| 2023-08-25T07:45:52
| 2023-08-25T07:45:52
| 298,111,548
| 243
| 14
|
Apache-2.0
| 2023-08-25T07:45:53
| 2020-09-23T22:43:33
|
PHP
|
UTF-8
|
C
| false
| false
| 1,503
|
c
|
event_loop.c
|
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include "php_aws_crt.h"
PHP_FUNCTION(aws_crt_event_loop_group_options_new) {
aws_php_parse_parameters_none();
aws_crt_event_loop_group_options *options = aws_crt_event_loop_group_options_new();
RETURN_LONG((zend_ulong)options);
}
PHP_FUNCTION(aws_crt_event_loop_group_options_release) {
zend_ulong php_options = 0;
aws_php_parse_parameters("l", &php_options);
aws_crt_event_loop_group_options *options = (void *)php_options;
aws_crt_event_loop_group_options_release(options);
}
PHP_FUNCTION(aws_crt_event_loop_group_options_set_max_threads) {
zend_ulong php_options = 0;
zend_ulong num_threads = 0;
aws_php_parse_parameters("ll", &php_options, &num_threads);
aws_crt_event_loop_group_options *options = (void *)php_options;
aws_crt_event_loop_group_options_set_max_threads(options, num_threads);
}
PHP_FUNCTION(aws_crt_event_loop_group_new) {
zend_ulong php_options = 0;
aws_php_parse_parameters("l", &php_options);
aws_crt_event_loop_group_options *options = (void *)php_options;
aws_crt_event_loop_group *elg = aws_crt_event_loop_group_new(options);
RETURN_LONG((zend_ulong)elg);
}
PHP_FUNCTION(aws_crt_event_loop_group_release) {
zend_ulong php_elg = 0;
aws_php_parse_parameters("l", &php_elg);
aws_crt_event_loop_group *elg = (void *)php_elg;
aws_crt_event_loop_group_release(elg);
}
|
e7dd1359c689d8b805484d14b7d5480d362105ec
|
c8b39acfd4a857dc15ed3375e0d93e75fa3f1f64
|
/Engine/Source/ThirdParty/coremod/coremod-4.2.6/src/loaders/mod_load.c
|
17a32a8ea1dd79081b36fae80c751703845b4b98
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
windystrife/UnrealEngine_NVIDIAGameWorks
|
c3c7863083653caf1bc67d3ef104fb4b9f302e2a
|
b50e6338a7c5b26374d66306ebc7807541ff815e
|
refs/heads/4.18-GameWorks
| 2023-03-11T02:50:08.471040
| 2022-01-13T20:50:29
| 2022-01-13T20:50:29
| 124,100,479
| 262
| 179
|
MIT
| 2022-12-16T05:36:38
| 2018-03-06T15:44:09
|
C++
|
UTF-8
|
C
| false
| false
| 6,105
|
c
|
mod_load.c
|
/* Extended Module Player format loaders
* Copyright (C) 1996-2014 Claudio Matsuoka and Hipolito Carraro Jr
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/* This loader recognizes the following variants of the Protracker
* module format:
*
* - Protracker M.K.
* - Fasttracker ?CHN and ??CH
*/
#include <ctype.h>
#include <limits.h>
#include "loader.h"
#include "mod.h"
static int mod_test (HIO_HANDLE *, char *, const int);
static int mod_load (struct module_data *, HIO_HANDLE *, const int);
const struct format_loader mod_loader = {
"Protracker",
mod_test,
mod_load
};
static int mod_test(HIO_HANDLE *f, char *t, const int start)
{
int i;
char buf[4];
hio_seek(f, start + 1080, SEEK_SET);
if (hio_read(buf, 1, 4, f) < 4)
return -1;
if (!strncmp(buf + 2, "CH", 2) && isdigit((int)buf[0]) && isdigit((int)buf[1])) {
i = (buf[0] - '0') * 10 + buf[1] - '0';
if (i > 0 && i <= 32) {
goto found;
}
}
if (!strncmp(buf + 1, "CHN", 3) && isdigit((int)*buf)) {
if (*buf >= '0' && *buf <='9') {
goto found;
}
}
if (memcmp(buf, "M.K.", 4))
return -1;
found:
hio_seek(f, start + 0, SEEK_SET);
read_title(f, t, 20);
return 0;
}
static int mod_load(struct module_data *m, HIO_HANDLE *f, const int start)
{
struct xmp_module *mod = &m->mod;
int i, j;
struct xmp_event *event;
struct mod_header mh;
uint8 mod_event[4];
char magic[8];
int ptkloop = 0; /* Protracker loop */
LOAD_INIT();
mod->ins = 31;
mod->smp = mod->ins;
mod->chn = 0;
m->quirk |= QUIRK_MODRNG;
hio_read(&mh.name, 20, 1, f);
for (i = 0; i < 31; i++) {
hio_read(&mh.ins[i].name, 22, 1, f); /* Instrument name */
mh.ins[i].size = hio_read16b(f); /* Length in 16-bit words */
mh.ins[i].finetune = hio_read8(f); /* Finetune (signed nibble) */
mh.ins[i].volume = hio_read8(f); /* Linear playback volume */
mh.ins[i].loop_start = hio_read16b(f); /* Loop start in 16-bit words */
mh.ins[i].loop_size = hio_read16b(f); /* Loop size in 16-bit words */
}
mh.len = hio_read8(f);
mh.restart = hio_read8(f);
hio_read(&mh.order, 128, 1, f);
memset(magic, 0, 8);
hio_read(magic, 4, 1, f);
if (!memcmp(magic, "M.K.", 4)) {
mod->chn = 4;
} else if (!strncmp(magic + 2, "CH", 2) &&
isdigit((int)magic[0]) && isdigit((int)magic[1])) {
mod->chn = (*magic - '0') * 10 + magic[1] - '0';
} else if (!strncmp(magic + 1, "CHN", 3) && isdigit((int)*magic)) {
mod->chn = *magic - '0';
} else {
return -1;
}
strncpy(mod->name, (char *) mh.name, 20);
mod->len = mh.len;
/* mod->rst = mh.restart; */
if (mod->rst >= mod->len)
mod->rst = 0;
memcpy(mod->xxo, mh.order, 128);
for (i = 0; i < 128; i++) {
/* This fixes dragnet.mod (garbage in the order list) */
if (mod->xxo[i] > 0x7f)
break;
if (mod->xxo[i] > mod->pat)
mod->pat = mod->xxo[i];
}
mod->pat++;
if (instrument_init(mod) < 0)
return -1;
for (i = 0; i < mod->ins; i++) {
if (subinstrument_alloc(mod, i, 1) < 0)
return -1;
mod->xxs[i].len = 2 * mh.ins[i].size;
mod->xxs[i].lps = 2 * mh.ins[i].loop_start;
mod->xxs[i].lpe = mod->xxs[i].lps + 2 * mh.ins[i].loop_size;
if (mod->xxs[i].lpe > mod->xxs[i].len)
mod->xxs[i].lpe = mod->xxs[i].len;
mod->xxs[i].flg = (mh.ins[i].loop_size > 1 && mod->xxs[i].lpe >= 4) ?
XMP_SAMPLE_LOOP : 0;
mod->xxi[i].sub[0].fin = (int8)(mh.ins[i].finetune << 4);
mod->xxi[i].sub[0].vol = mh.ins[i].volume;
mod->xxi[i].sub[0].pan = 0x80;
mod->xxi[i].sub[0].sid = i;
instrument_name(mod, i, mh.ins[i].name, 22);
if (mod->xxs[i].len > 0)
mod->xxi[i].nsm = 1;
}
mod->trk = mod->chn * mod->pat;
set_type(m, mod->chn == 4 ? "Protracker" : "Fasttracker");
MODULE_INFO();
for (i = 0; i < mod->ins; i++) {
D_(D_INFO "[%2X] %-22.22s %04x %04x %04x %c V%02x %+d %c\n",
i, mod->xxi[i].name,
mod->xxs[i].len, mod->xxs[i].lps, mod->xxs[i].lpe,
(mh.ins[i].loop_size > 1 && mod->xxs[i].lpe > 8) ?
'L' : ' ', mod->xxi[i].sub[0].vol,
mod->xxi[i].sub[0].fin >> 4,
ptkloop && mod->xxs[i].lps == 0 && mh.ins[i].loop_size > 1 &&
mod->xxs[i].len > mod->xxs[i].lpe ? '!' : ' ');
}
if (pattern_init(mod) < 0)
return -1;
/* Load and convert patterns */
D_(D_INFO "Stored patterns: %d", mod->pat);
for (i = 0; i < mod->pat; i++) {
if (pattern_tracks_alloc(mod, i, 64) < 0)
return -1;
for (j = 0; j < (64 * mod->chn); j++) {
event = &EVENT (i, j % mod->chn, j / mod->chn);
hio_read (mod_event, 1, 4, f);
decode_protracker_event(event, mod_event);
}
}
/* Load samples */
D_(D_INFO "Stored samples: %d", mod->smp);
for (i = 0; i < mod->smp; i++) {
int flags;
if (!mod->xxs[i].len)
continue;
flags = ptkloop ? SAMPLE_FLAG_FULLREP : 0;
if (load_sample(m, f, flags, &mod->xxs[i], NULL) < 0)
return -1;
}
if (mod->chn > 4) {
m->quirk &= ~QUIRK_MODRNG;
m->quirk |= QUIRKS_FT2;
m->read_event_type = READ_EVENT_FT2;
m->quirk |= QUIRK_INVLOOP;
}
return 0;
}
|
a91f99fd21b0fd98965d146507c8e745a33749f7
|
2305bef48afa810d966bc48afdc37f15daf512e8
|
/unix/xdq.c
|
8ef28c8958ddbcde6b18e9adc8f373e547edba40
|
[] |
no_license
|
kentonv/dvorak-qwerty
|
1fd09b34dee5d3e0a2744c06874d36458fea213d
|
1819148d8497a1989e87349d083f723659d0b9e6
|
refs/heads/master
| 2022-05-08T08:41:24.764386
| 2022-04-28T18:42:35
| 2022-04-28T18:42:35
| 32,957,784
| 174
| 29
| null | 2022-04-06T05:01:29
| 2015-03-26T23:42:46
|
C++
|
UTF-8
|
C
| false
| false
| 13,774
|
c
|
xdq.c
|
// "Dvorak-Qwerty" keyboard layout for the X-Window system
// Copyright 2010 Google Inc. All rights reserved.
// http://dvorak-qwerty.googlecode.com
// Author: Kenton Varda (temporal@gmail.com; formerly kenton@google.com)
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// HOW TO USE
//
// Compile with:
//
// gcc xdq.c -o xdq -std=gnu99 -O2 -lX11
//
// If you don't have GCC, and the C compiler you do have is not C99-compliant,
// try compiling with a C++ compiler instead.
//
// Once compiled, make sure your keyboard layout is set to Dvorak and then run
// the "xdq" binary. While running, keys you press while holding control or
// alt (but not both together) will be remapped to Qwerty. To stop, just kill
// xdq.
// BACKGROUND
//
// This file implements the "Dvorak-Qwerty" keyboard layout, in which the layout
// is normally Dvorak but switches to Qwerty when control or alt is held. There
// are two reasons why I prefer this layout over straight Dvorak:
// - The common copy/paste hotkeys X, C, and V remain on the left hand, and so
// can be used while the right hand is on the mouse.
// - Holding the control key with my pinky tends to make it hard for me to
// remember where many keys are located, because my hands are no longer
// positioned as they would be when touch-typing. Meanwhile, the labels on
// my keyboard are Qwerty, because I no longer bother reconfiguring them
// physically. With the Dvorak-Qwerty layout, I can look at the keyboard to
// find the key I want.
//
// The layout is available by default on Mac OSX. Unfortunately, it is not
// typically shipped with Linux distributions. Even more unfortunately,
// although it is possible to define an XKB layout which implements
// Dvorak-Qwerty, doing so exposes a depressing number of bugs across the board
// in X apps. Since it is the responsibility of each X app to interpret the
// keyboard layout itself, rather than having the X server do the work,
// different GUI frameworks actually tend to have different bugs that kick in
// when using such a layout. Fixing them all would be infeasible.
//
// This program instead works by passively grabbing (with XGrabKey()) all
// relevant combinations, rewriting the event, and then using XSendEvent() to
// send it to the focused window.
//
// xdq can only remap program-level hotkeys, not system-level hotkeys, as
// system-level hotkeys are typically themselves implemented using XGrabKey().
// To avoid conflicts with system-level hotkeys, xdq only grabs key combinations
// involving holding control *or* alt, not both together. xdq also does NOT
// try to grab anything involving the "windows" key. If you would like xdq to
// grab all these keys as well, system hotkeys be damned, then compile with
// -DXDQ_GREEDY.
// IF YOU LIKE IT
//
// If you find this useful, consider sending me a note at temporal@gmail.com to
// say so. Otherwise people only contact me when things don't work and that's
// depressing. :)
#include <X11/Xlib.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <unistd.h>
#define arraysize(array) (sizeof(array) / sizeof((array)[0]))
#define bit_is_set(arr, i) ((arr[i/8] & (1 << (i % 8))) != 0)
unsigned int kModifierKeycodes[] = {
37, 105, // ctrl (L, R)
64, 108, // alt (L, R)
50, 62, // shift (L, R)
};
// X keycodes corresponding to keys, regardless of layout.
const int kKeycodes[] = {
20, 21,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61
};
const char kQwerty[] =
"-="
"qwertyuiop[]"
"asdfghjkl;'"
"zxcvbnm,./";
const char kDvorak[] =
"[]"
"',.pyfgcrl/="
"aoeuidhtns-"
";qjkxbmwvz";
// The user has their keyboard layout set to Dvorak. When we get a keycode, we
// map it to a letter acconding to Qwerty, then figure out which keycode would
// map to the same letter in Dvorak. This tells us what keycode to send to the
// focus window. For efficiency, we build a lookup table in keycode_mapping.
//
// keycode --qwerty--> letter --reverse-dvorak--> new keycode
int keycode_mapping[256];
void InitKeycodeMapping() {
int size = arraysize(kKeycodes);
int dvorak_to_keycode[128];
memset(dvorak_to_keycode, 0, sizeof(dvorak_to_keycode));
for (int i = 0; i < size; i++) {
dvorak_to_keycode[(int) kDvorak[i]] = kKeycodes[i];
}
memset(keycode_mapping, 0, sizeof(keycode_mapping));
for (int i = 0; i < size; i++) {
assert(dvorak_to_keycode[(int) kQwerty[i]] != 0);
keycode_mapping[kKeycodes[i]] = dvorak_to_keycode[(int) kQwerty[i]];
}
}
// We receive X errors if we grab keys that are already grabbed. This is not
// really fatal so we catch them.
int failed_grab_count = 0;
int (*original_error_handler)(Display* display, XErrorEvent* error);
int HandleError(Display* display, XErrorEvent* error) {
if (error->error_code == BadAccess) {
++failed_grab_count;
return 0;
} else {
return original_error_handler(display, error);
}
}
int main(int argc, char* argv[]) {
InitKeycodeMapping();
// Open the display and get the root window.
Display* display = XOpenDisplay(NULL);
if (display == NULL) {
fprintf(stderr, "Couldn't open display.\n");
return 1;
}
Window window = DefaultRootWindow(display);
// Establish grabs to intercept the events we want.
if (0) {
// Method 1: Grab the actual modifier keys.
//
// The keycodes here are for left control, right control, left alt, and
// command ("windows"), not necessarily in that order (I forget). Right
// alt doesn't seem to produce any event for me; maybe my keyboard is
// broken.
//
// In any case, the way XGrabKey() works, while the grabbed key is held
// down, *all* keyboard events are redirected to the grabber (us). This
// may seem perfect, but has some down-sides:
// * While the grab is in effect, the focused window is apparently notified
// that it is no longer receiving keystrokes directly. Many text editors
// respond by hiding the cursor. This is particularly annoying when using
// ctrl+arrows to move the cursor a word at a time, because you cannot
// actually see what you're doing.
// * I'm not sure how this interacts with other grabs in effect on specific
// key combinations. E.g. what happens if the system is configured to
// open some particular program when ctrl+alt+shift+z is pressed? Will
// it get the event, or will we? If we get the event, we'll forward it
// to the focused window, which means whatever the system wanted to do
// with it won't happen, which would be bad.
int keycodes[] = {37, 64, 109, 115};
for (int i = 0; i < arraysize(keycodes); i++) {
XGrabKey(display, keycodes[i], 0, window, True,
GrabModeAsync, GrabModeAsync);
}
} else {
// Method 2: Grab each individual key combination.
//
// This solves the cursor-disappearing problem with method 1. We can also
// avoid interfering with system hotkeys by only grabbing ctrl and alt
// individually but not when used together. Compile with -DXDQ_GREEDY if
// you really want to grab everything.
// We will try to grab all of these modifier combinations.
unsigned int modifiers[] = {
// Control.
ControlMask,
ControlMask | ShiftMask,
// Alt.
Mod1Mask,
Mod1Mask | ShiftMask,
#ifdef XQD_GREEDY
// Command/"Windows" key. This is usually used for system-level hotkeys,
// so only grab it in greedy mode.
Mod4Mask,
Mod4Mask | ShiftMask,
// Control + Alt. Also typically used for system-level hotkeys.
ControlMask | Mod1Mask,
ControlMask | Mod1Mask | ShiftMask,
#endif
// TODO(kenton): Other combinations?
};
// Often, some keys are already grabbed, e.g. by the desktop environment.
// Set an error handler so that we can ignore those.
original_error_handler = XSetErrorHandler(&HandleError);
for (int i = 0; i < arraysize(kKeycodes); i++) {
for (int j = 0; j < arraysize(modifiers); j++) {
XGrabKey(display, kKeycodes[i], modifiers[j], window, True,
GrabModeAsync, GrabModeAsync);
}
}
}
// Make sure all errors have been reported, then print how many errors we saw.
XSync(display, False);
if (failed_grab_count != 0) {
fprintf(stderr, "Failed to grab %d key combinations.\n", failed_grab_count);
fprintf(stderr,
"This is probably because some hotkeys are already grabbed by the system.\n"
"Unfortunately, these system-wide hotkeys cannot be automatically remapped by\n"
"this tool. However, you can usually configure them manually.\n");
}
// Event loop.
XEvent down, up;
int state = 0;
char keysAtPress[32];
for (;;) {
XEvent event;
XNextEvent(display, &event);
switch (event.type) {
case KeyPress:
case KeyRelease: {
if (event.xkey.send_event) {
fprintf(stderr, "SendEvent loop?\n");
break;
}
// Interpret the key event, remap it, and save for later.
// NOTE(kenton): I think this conditional is always true but better
// safe than sorry.
if (event.xkey.keycode >= 0 &&
event.xkey.keycode < arraysize(keycode_mapping)) {
int new_keycode = keycode_mapping[event.xkey.keycode];
if (new_keycode != 0) {
event.xkey.keycode = new_keycode;
}
// We can't actually send the event yet because during a grab the
// active window loses keyboard focus. Many apps are fine with
// receiving key events when not focused, but some get very confused.
// The grab (and loss of focus) extends from the KeyPress to the
// KeyRelease event, so we'll send both remapped events immediately
// after KeyRelease. Sadly this makes hotkeys feel laggy, but oh
// well.
if (event.type == KeyPress) {
down = event;
state = 1;
XQueryKeymap(display, keysAtPress);
} else if (state == 1 && event.type == KeyRelease &&
event.xkey.keycode == down.xkey.keycode) {
up = event;
state = 2;
}
}
// If we have received both a KeyPress and a KeyRelease, send the
// remapped events to the currently-focused window.
if (state == 2) {
// Find the focused window.
int junk;
XGetInputFocus(display, &down.xkey.window, &junk);
up.xkey.window = down.xkey.window;
// NX client forgets which modifier keys are down whenever it loses
// focus, so check which are down and re-send keydown events for them.
for (unsigned int i = 0; i < arraysize(kModifierKeycodes); i++) {
unsigned int keycode = kModifierKeycodes[i];
if (bit_is_set(keysAtPress, keycode)) {
XEvent modifier = down;
modifier.xkey.keycode = keycode;
XSendEvent(display, modifier.xkey.window, True, 0, &modifier);
}
}
// Send our remapped KeyPress followed by KeyRelease.
XSendEvent(display, down.xkey.window, True, 0, &down);
XSendEvent(display, up.xkey.window, True, 0, &up);
state = 0;
// If some modifier keys were released between when the keypress
// happened and now, we should send release events.
char keys[32];
XQueryKeymap(display, keys);
for (unsigned int i = 0; i < arraysize(kModifierKeycodes); i++) {
unsigned int keycode = kModifierKeycodes[i];
if (bit_is_set(keysAtPress, keycode) &&
!bit_is_set(keys, keycode)) {
XEvent modifier = up;
modifier.xkey.keycode = keycode;
XSendEvent(display, modifier.xkey.window, True, 0, &modifier);
}
}
}
break;
}
default:
fprintf(stderr, "Unknown event: %d\n", event.type);
break;
}
}
}
|
623bfa17bba7a685510d27f31871ea8849575ca7
|
79d343002bb63a44f8ab0dbac0c9f4ec54078c3a
|
/lib/libc/musl/src/math/fmaf.c
|
7c65acf1fc5e07eabcad635b31af3ee747319106
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-musl-exception"
] |
permissive
|
ziglang/zig
|
4aa75d8d3bcc9e39bf61d265fd84b7f005623fc5
|
f4c9e19bc3213c2bc7e03d7b06d7129882f39f6c
|
refs/heads/master
| 2023-08-31T13:16:45.980913
| 2023-08-31T05:50:29
| 2023-08-31T05:50:29
| 40,276,274
| 25,560
| 2,399
|
MIT
| 2023-09-14T21:09:50
| 2015-08-06T00:51:28
|
Zig
|
UTF-8
|
C
| false
| false
| 3,044
|
c
|
fmaf.c
|
/* origin: FreeBSD /usr/src/lib/msun/src/s_fmaf.c */
/*-
* Copyright (c) 2005-2011 David Schultz <das@FreeBSD.ORG>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <fenv.h>
#include <math.h>
#include <stdint.h>
/*
* Fused multiply-add: Compute x * y + z with a single rounding error.
*
* A double has more than twice as much precision than a float, so
* direct double-precision arithmetic suffices, except where double
* rounding occurs.
*/
float fmaf(float x, float y, float z)
{
#pragma STDC FENV_ACCESS ON
double xy, result;
union {double f; uint64_t i;} u;
int e;
xy = (double)x * y;
result = xy + z;
u.f = result;
e = u.i>>52 & 0x7ff;
/* Common case: The double precision result is fine. */
if ((u.i & 0x1fffffff) != 0x10000000 || /* not a halfway case */
e == 0x7ff || /* NaN */
(result - xy == z && result - z == xy) || /* exact */
fegetround() != FE_TONEAREST) /* not round-to-nearest */
{
/*
underflow may not be raised correctly, example:
fmaf(0x1p-120f, 0x1p-120f, 0x1p-149f)
*/
#if defined(FE_INEXACT) && defined(FE_UNDERFLOW)
if (e < 0x3ff-126 && e >= 0x3ff-149 && fetestexcept(FE_INEXACT)) {
feclearexcept(FE_INEXACT);
/* TODO: gcc and clang bug workaround */
volatile float vz = z;
result = xy + vz;
if (fetestexcept(FE_INEXACT))
feraiseexcept(FE_UNDERFLOW);
else
feraiseexcept(FE_INEXACT);
}
#endif
z = result;
return z;
}
/*
* If result is inexact, and exactly halfway between two float values,
* we need to adjust the low-order bit in the direction of the error.
*/
double err;
int neg = u.i >> 63;
if (neg == (z > xy))
err = xy - result + z;
else
err = z - result + xy;
if (neg == (err < 0))
u.i++;
else
u.i--;
z = u.f;
return z;
}
|
3ce01b54c9e10223ff778209701373f6e4c78d93
|
fc89df5d65edcfada41a5bcc3b5c52d7b00d9d13
|
/components/display/core/gds_text.c
|
b2ef8903889d5579472691d2fe9f27509ca8d5a6
|
[] |
no_license
|
sle118/squeezelite-esp32
|
b83f580b4de4a517be87ca1db8fead2223694068
|
149c9d8142d7a3f8679efc73985609eb6dee73a5
|
refs/heads/master-v4.3
| 2023-08-23T05:06:18.461579
| 2023-08-22T23:30:18
| 2023-08-22T23:30:18
| 187,874,998
| 698
| 84
| null | 2023-09-09T22:58:50
| 2019-05-21T16:25:56
|
C
|
UTF-8
|
C
| false
| false
| 6,118
|
c
|
gds_text.c
|
/*
* (c) Philippe G. 2019, philippe_44@outlook.com
*
* This software is released under the MIT License.
* https://opensource.org/licenses/MIT
*
*/
#include <string.h>
#include <ctype.h>
#include <stdint.h>
#include <arpa/inet.h>
#include "esp_log.h"
#include "gds_private.h"
#include "gds.h"
#include "gds_draw.h"
#include "gds_text.h"
#define max(a,b) (((a) > (b)) ? (a) : (b))
static char TAG[] = "gds";
/****************************************************************************************
* Set fonts for each line in text mode
*/
static const struct GDS_FontDef *GuessFont( struct GDS_Device *Device, int FontType) {
switch(FontType) {
case GDS_FONT_DEFAULT:
return Device->Font;
case GDS_FONT_LINE_1:
return &Font_line_1;
case GDS_FONT_LINE_2:
return &Font_line_2;
case GDS_FONT_MEDIUM:
//return &Font_droid_sans_fallback_15x17;
case GDS_FONT_SMALL:
default:
return &Font_droid_sans_fallback_11x13;
#ifdef USE_LARGE_FONTS
case GDS_FONT_LARGE:
return &Font_droid_sans_fallback_24x28;
case GDS_FONT_SEGMENT:
if (Device->Height == 32) return &Font_Tarable7Seg_16x32;
else return &Font_Tarable7Seg_32x64;
#else
case GDS_FONT_LARGE:
case GDS_FONT_SEGMENT:
ESP_LOGW(TAG, "large fonts disabled");
//return &Font_droid_sans_fallback_15x17;
return &Font_droid_sans_fallback_11x13;
#endif
}
}
/****************************************************************************************
* Set fonts for each line in text mode
*/
bool GDS_TextSetFontAuto(struct GDS_Device* Device, int N, int FontType, int Space) {
const struct GDS_FontDef *Font = GuessFont( Device, FontType );
return GDS_TextSetFont( Device, N, Font, Space );
}
/****************************************************************************************
* Set fonts for each line in text mode
*/
bool GDS_TextSetFont(struct GDS_Device* Device, int N, const struct GDS_FontDef *Font, int Space) {
if (--N >= MAX_LINES) return false;
Device->Lines[N].Font = Font;
// re-calculate lines absolute position
Device->Lines[N].Space = Space;
Device->Lines[0].Y = Device->Lines[0].Space;
for (int i = 1; i <= N; i++) Device->Lines[i].Y = Device->Lines[i-1].Y + Device->Lines[i-1].Font->Height + Device->Lines[i].Space;
ESP_LOGI(TAG, "Adding line %u at %d (height:%u)", N + 1, Device->Lines[N].Y, Device->Lines[N].Font->Height);
if (Device->Lines[N].Y + Device->Lines[N].Font->Height > Device->Height) {
ESP_LOGW(TAG, "line does not fit display");
return false;
}
return true;
}
/****************************************************************************************
*
*/
bool GDS_TextLine(struct GDS_Device* Device, int N, int Pos, int Attr, char *Text) {
int Width, X = Pos;
// counting 1..n
N--;
GDS_SetFont( Device, Device->Lines[N].Font );
if (Attr & GDS_TEXT_MONOSPACE) GDS_FontForceMonospace( Device, true );
Width = GDS_FontMeasureString( Device, Text );
// adjusting position, erase only EoL for rigth-justified
if (Pos == GDS_TEXT_RIGHT) X = Device->TextWidth - Width - 1;
else if (Pos == GDS_TEXT_CENTER) X = (Device->TextWidth - Width) / 2;
// erase if requested
if (Attr & GDS_TEXT_CLEAR) {
int Y_min = max(0, Device->Lines[N].Y), Y_max = max(0, Device->Lines[N].Y + Device->Lines[N].Font->Height);
for (int c = (Attr & GDS_TEXT_CLEAR_EOL) ? X : 0; c < Device->TextWidth; c++)
for (int y = Y_min; y < Y_max; y++)
DrawPixelFast( Device, c, y, GDS_COLOR_BLACK );
}
GDS_FontDrawString( Device, X, Device->Lines[N].Y, Text, GDS_COLOR_WHITE );
ESP_LOGD(TAG, "displaying %s line %u (x:%d, attr:%u)", Text, N+1, X, Attr);
// update whole display if requested
Device->Dirty = true;
if (Attr & GDS_TEXT_UPDATE) GDS_Update( Device );
return Width + X < Device->TextWidth;
}
/****************************************************************************************
*
*/
int GDS_GetTextWidth(struct GDS_Device* Device, int N, int Attr, char *Text) {
const struct GDS_FontDef *Font = GDS_SetFont( Device, Device->Lines[N-1].Font );
if (Attr & GDS_TEXT_MONOSPACE) GDS_FontForceMonospace( Device, true );
int Width = GDS_FontMeasureString( Device, Text );
GDS_SetFont( Device, Font );
return Width;
}
/****************************************************************************************
* Try to align string for better scrolling visual. there is probably much better to do
*/
int GDS_TextStretch(struct GDS_Device* Device, int N, char *String, int Max) {
char Space[] = " ";
int Len = strlen(String), Extra = 0, Boundary;
N--;
// we might already fit
GDS_SetFont( Device, Device->Lines[N].Font );
if (GDS_FontMeasureString( Device, String ) <= Device->TextWidth) return 0;
// add some space for better visual
strncat(String, Space, Max-Len);
String[Max] = '\0';
Len = strlen(String);
// mark the end of the extended string
Boundary = GDS_FontMeasureString( Device, String );
// add a full display width
while (Len < Max && GDS_FontMeasureString( Device, String ) - Boundary < Device->TextWidth) {
String[Len++] = String[Extra++];
String[Len] = '\0';
}
return Boundary;
}
/****************************************************************************************
*
*/
void GDS_TextPos(struct GDS_Device* Device, int FontType, int Where, int Attr, char *Text, ...) {
va_list args;
TextAnchor Anchor = TextAnchor_Center;
if (Attr & GDS_TEXT_CLEAR) GDS_Clear( Device, GDS_COLOR_BLACK );
if (!Text) return;
va_start(args, Text);
switch(Where) {
case GDS_TEXT_TOP_LEFT:
default:
Anchor = TextAnchor_NorthWest;
break;
case GDS_TEXT_MIDDLE_LEFT:
Anchor = TextAnchor_West;
break;
case GDS_TEXT_BOTTOM_LEFT:
Anchor = TextAnchor_SouthWest;
break;
case GDS_TEXT_CENTERED:
Anchor = TextAnchor_Center;
break;
}
ESP_LOGD(TAG, "Displaying %s at %u with attribute %u", Text, Anchor, Attr);
GDS_SetFont( Device, GuessFont( Device, FontType ) );
GDS_FontDrawAnchoredString( Device, Anchor, Text, GDS_COLOR_WHITE );
Device->Dirty = true;
if (Attr & GDS_TEXT_UPDATE) GDS_Update( Device );
va_end(args);
}
|
9a7cf6576ba3ba38b9e9319fb9ab0dea0858737f
|
37a06095ec3cbb1891a29e9959fc3d500bb3a6dc
|
/coe.c
|
23879eb038e45b649df5d6755200bcb3b5dd36bb
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
notqmail/notqmail
|
bb5e9b2c6b2e8b5d38e87ebc6dc6c59663720ca0
|
1fd3f91cf93e96be9bc9b8c6e7babc769fda359d
|
refs/heads/master
| 2023-08-30T21:17:49.742353
| 2020-06-17T17:37:59
| 2022-11-15T13:11:47
| 195,577,352
| 294
| 31
|
NOASSERTION
| 2022-11-16T16:28:19
| 2019-07-06T19:46:51
|
C
|
UTF-8
|
C
| false
| false
| 92
|
c
|
coe.c
|
#include "coe.h"
#include <fcntl.h>
int coe(fd)
int fd;
{
return fcntl(fd,F_SETFD,1);
}
|
b49ff67848dec5c4f210087f96af7898db376da8
|
ae31542273a142210a1ff30fb76ed9d45d38eba9
|
/src/backend/fts/test/ftsprobe_test.c
|
147125c3666a29a6f4b2b4071cddde40e43caea1
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"PostgreSQL",
"OpenSSL",
"LicenseRef-scancode-stream-benchmark",
"ISC",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-ssleay-windows",
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
greenplum-db/gpdb
|
8334837bceb2d5d51a684500793d11b190117c6a
|
2c0f8f0fb24a2d7a7da114dc80f5f5a2712fca50
|
refs/heads/main
| 2023-08-22T02:03:03.806269
| 2023-08-21T22:59:53
| 2023-08-22T01:17:10
| 44,781,140
| 6,417
| 2,082
|
Apache-2.0
| 2023-09-14T20:33:42
| 2015-10-23T00:25:17
|
C
|
UTF-8
|
C
| false
| false
| 48,377
|
c
|
ftsprobe_test.c
|
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include "cmockery.h"
#include "postgres.h"
#include "utils/memutils.h"
#include <poll.h>
static int poll_expected_return_value;
static int poll_expected_revents;
#define poll poll_mock
static struct pollfd *PollFds;
static int
poll_mock (struct pollfd * p1, nfds_t p2, int p3)
{
int i;
for (i = 0; i < poll_expected_return_value; i++)
PollFds[i].revents = poll_expected_revents;
return poll_expected_return_value;
}
#include "postgres.h"
/* Actual function body */
#include "../ftsprobe.c"
static void
InitFtsProbeInfo(void)
{
static FtsProbeInfo fts_info;
ftsProbeInfo = &fts_info;
}
static void poll_will_return(int expected_return_value, int revents)
{
poll_expected_return_value = expected_return_value;
poll_expected_revents = revents;
}
/*
* Function to help create representation of gp_segment_configuration, for
* ease of testing different scenarios. Using the same can mock out different
* configuration layouts.
*
* --------------------------------
* Inputs:
* segCnt - number of primary segments the configuration should mock
* has_mirrors - controls if mirrors corresponding to primary are created
* --------------------------------
*
* Function always adds coordinator to the configuration. Also, all the segments
* are by default marked up. Tests leverage to create initial configuration
* using this and then modify the same as per needs to mock different
* scenarios like mirror down, primary down, etc...
*/
static CdbComponentDatabases *
InitTestCdb(int segCnt, bool has_mirrors, char default_mode)
{
int i = 0;
int mirror_multiplier = 1;
if (has_mirrors)
mirror_multiplier = 2;
CdbComponentDatabases *cdb =
(CdbComponentDatabases *) palloc(sizeof(CdbComponentDatabases));
cdb->total_entry_dbs = 1;
cdb->total_segment_dbs = segCnt * mirror_multiplier; /* with mirror? */
cdb->total_segments = segCnt;
cdb->entry_db_info = palloc(
sizeof(CdbComponentDatabaseInfo) * cdb->total_entry_dbs);
cdb->segment_db_info = palloc(
sizeof(CdbComponentDatabaseInfo) * cdb->total_segment_dbs);
/* create the coordinator entry_db_info */
CdbComponentDatabaseInfo *cdbinfo = &cdb->entry_db_info[0];
cdbinfo->config = (GpSegConfigEntry*)palloc(sizeof(GpSegConfigEntry));
cdbinfo->config->dbid = 1;
cdbinfo->config->segindex = -1;
cdbinfo->config->role = GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY;
cdbinfo->config->preferred_role = GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY;
cdbinfo->config->status = GP_SEGMENT_CONFIGURATION_STATUS_UP;
/* create the segment_db_info entries */
for (i = 0; i < cdb->total_segment_dbs; i++)
{
CdbComponentDatabaseInfo *cdbinfo = &cdb->segment_db_info[i];
cdbinfo->config = (GpSegConfigEntry*)palloc(sizeof(GpSegConfigEntry));
cdbinfo->config->dbid = i + 2;
cdbinfo->config->segindex = i / 2;
cdbinfo->config->hostip = palloc(4);
snprintf(cdbinfo->config->hostip, 4, "%d", cdbinfo->config->dbid);
cdbinfo->config->port = cdbinfo->config->dbid;
if (has_mirrors)
{
cdbinfo->config->role = i % 2 ?
GP_SEGMENT_CONFIGURATION_ROLE_MIRROR :
GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY;
cdbinfo->config->preferred_role = i % 2 ?
GP_SEGMENT_CONFIGURATION_ROLE_MIRROR :
GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY;
}
else
{
cdbinfo->config->role = GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY;
cdbinfo->config->preferred_role = GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY;
}
cdbinfo->config->status = GP_SEGMENT_CONFIGURATION_STATUS_UP;
cdbinfo->config->mode = default_mode;
}
return cdb;
}
/* Initialize connection and startTime for each primary-mirror pair. */
static void
init_fts_context(fts_context *context, FtsMessageState state)
{
pg_time_t now = (pg_time_t) time(NULL);
int i;
for(i = 0; i < context->num_pairs; i++)
{
context->perSegInfos[i].state = state;
context->perSegInfos[i].startTime = now;
context->perSegInfos[i].conn = (PGconn *) palloc0(sizeof(PGconn));
}
}
static CdbComponentDatabaseInfo *
GetSegmentFromCdbComponentDatabases(CdbComponentDatabases *dbs,
int16 segindex, char role)
{
int i;
for (i = 0; i < dbs->total_segment_dbs; i++)
{
CdbComponentDatabaseInfo *cdb = &dbs->segment_db_info[i];
if (cdb->config->segindex == segindex && cdb->config->role == role)
return cdb;
}
return NULL;
}
static void
ExpectedPrimaryAndMirrorConfiguration(CdbComponentDatabaseInfo *primary,
CdbComponentDatabaseInfo *mirror,
char primaryStatus,
char mirrorStatus,
char mode,
char newPrimaryRole,
char newMirrorRole,
bool willUpdatePrimary,
bool willUpdateMirror)
{
/* mock probeWalRepUpdateConfig */
if (willUpdatePrimary)
{
expect_value(probeWalRepUpdateConfig, dbid, primary->config->dbid);
expect_value(probeWalRepUpdateConfig, segindex, primary->config->segindex);
expect_value(probeWalRepUpdateConfig, role, newPrimaryRole);
expect_value(probeWalRepUpdateConfig, IsSegmentAlive,
primaryStatus == GP_SEGMENT_CONFIGURATION_STATUS_UP ? true : false);
expect_value(probeWalRepUpdateConfig, IsInSync,
mode == GP_SEGMENT_CONFIGURATION_MODE_INSYNC ? true : false);
will_be_called(probeWalRepUpdateConfig);
}
if (willUpdateMirror)
{
expect_value(probeWalRepUpdateConfig, dbid, mirror->config->dbid);
expect_value(probeWalRepUpdateConfig, segindex, mirror->config->segindex);
expect_value(probeWalRepUpdateConfig, role, newMirrorRole);
expect_value(probeWalRepUpdateConfig, IsSegmentAlive,
mirrorStatus == GP_SEGMENT_CONFIGURATION_STATUS_UP ? true : false);
expect_value(probeWalRepUpdateConfig, IsInSync,
mode == GP_SEGMENT_CONFIGURATION_MODE_INSYNC ? true : false);
will_be_called(probeWalRepUpdateConfig);
}
}
static void
PrimaryOrMirrorWillBeUpdated(int count)
{
will_be_called_count(StartTransactionCommand, count);
will_be_called_count(GetTransactionSnapshot, count);
will_be_called_count(CommitTransactionCommand, count);
}
/*
* One primary segment, connection starts successfully from initial state.
*/
static void
test_ftsConnect_FTS_PROBE_SEGMENT(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_INSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
char primary_conninfo[1024];
fts_segment_info *ftsInfo = &context.perSegInfos[0];
ftsInfo->conn = NULL;
ftsInfo->startTime = 0;
PGconn *pgconn = palloc(sizeof(PGconn));
pgconn->status = CONNECTION_STARTED;
pgconn->sock = 11;
snprintf(primary_conninfo, 1024, "host=%s port=%d gpconntype=%s",
ftsInfo->primary_cdbinfo->config->hostip, ftsInfo->primary_cdbinfo->config->port,
GPCONN_TYPE_FTS);
expect_string(PQconnectStart, conninfo, primary_conninfo);
will_return(PQconnectStart, pgconn);
ftsConnect(&context);
assert_true(ftsInfo->state == FTS_PROBE_SEGMENT);
/* Successful ftsConnect must set the socket to be polled for writing. */
assert_true(ftsInfo->poll_events & POLLOUT);
/* Successful connections must have their startTime recorded. */
assert_true(ftsInfo->startTime > 0);
}
/*
* Two primary segments, connection for one segment fails due to libpq
* returning CONNECTION_BAD. Connection for the other is in FTS_PROBE_SEGMENT
* and advances to the next libpq state.
*/
static void
test_ftsConnect_one_failure_one_success(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
2, true, GP_SEGMENT_CONFIGURATION_MODE_INSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SEGMENT);
fts_segment_info *success_resp = &context.perSegInfos[0];
success_resp->conn->status = CONNECTION_STARTED;
success_resp->conn->sock = 11;
/* Assume that the successful socket is ready for writing. */
success_resp->poll_revents = POLLOUT;
expect_value(PQconnectPoll, conn, success_resp->conn);
will_return(PQconnectPoll, PGRES_POLLING_READING);
/* Ensure that PQstatus doesn't report that this connection is established. */
expect_value(PQstatus, conn, success_resp->conn);
will_return(PQstatus, CONNECTION_STARTED);
fts_segment_info *failure_resp = &context.perSegInfos[1];
pfree(failure_resp->conn);
failure_resp->conn = NULL;
char primary_conninfo_failure[1024];
snprintf(primary_conninfo_failure, 1024, "host=%s port=%d gpconntype=%s",
failure_resp->primary_cdbinfo->config->hostip,
failure_resp->primary_cdbinfo->config->port,
GPCONN_TYPE_FTS);
expect_string(PQconnectStart, conninfo, primary_conninfo_failure);
PGconn *failure_pgconn = palloc(sizeof(PGconn));
failure_pgconn->status = CONNECTION_BAD;
will_return(PQconnectStart, failure_pgconn);
expect_value(PQerrorMessage, conn, failure_pgconn);
will_return(PQerrorMessage, "");
ftsConnect(&context);
assert_true(success_resp->state == FTS_PROBE_SEGMENT);
/*
* Successful segment's socket must be set to be polled for reading because
* we simulated PQconnectPoll() to return PGRES_POLLING_READING.
*/
assert_true(success_resp->poll_events & POLLIN);
assert_true(failure_resp->state == FTS_PROBE_FAILED);
}
/*
* Starting with one content (primary-mirrror pair) in FTS_PROBE_SEGMENT, test
* ftsConnect() followed by ftsPoll().
*/
static void
test_ftsConnect_ftsPoll(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_INSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
context.perSegInfos[0].state = FTS_PROBE_SEGMENT;
InitPollFds(1);
char primary_conninfo[1024];
fts_segment_info *ftsInfo = &context.perSegInfos[0];
PGconn *pgconn = palloc(sizeof(PGconn));
pgconn->status = CONNECTION_STARTED;
pgconn->sock = 11;
snprintf(primary_conninfo, 1024, "host=%s port=%d gpconntype=%s",
ftsInfo->primary_cdbinfo->config->hostip, ftsInfo->primary_cdbinfo->config->port,
GPCONN_TYPE_FTS);
expect_string(PQconnectStart, conninfo, primary_conninfo);
will_return(PQconnectStart, pgconn);
ftsConnect(&context);
assert_true(ftsInfo->state == FTS_PROBE_SEGMENT);
assert_true(ftsInfo->startTime > 0);
/* Successful ftsConnect must set the socket to be polled for writing. */
assert_true(ftsInfo->poll_events & POLLOUT);
expect_value(PQsocket, conn, ftsInfo->conn);
will_return(PQsocket, ftsInfo->conn->sock);
#ifdef USE_ASSERT_CHECKING
expect_value(PQsocket, conn, ftsInfo->conn);
will_return(PQsocket, ftsInfo->conn->sock);
#endif
/*
* Simulate poll() returns write-ready for the only descriptor in
* fts_context.
*/
poll_will_return(1, POLLOUT);
ftsPoll(&context);
assert_true(ftsInfo->poll_revents & POLLOUT);
assert_true(ftsInfo->poll_events == 0);
}
/*
* 1 primary-mirror pair, send successful
*/
static void
test_ftsSend_success(void **state)
{
char message[FTS_MSG_MAX_LEN];
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_INSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SEGMENT);
fts_segment_info *ftsInfo = &context.perSegInfos[0];
ftsInfo->conn->asyncStatus = PGASYNC_IDLE;
ftsInfo->poll_revents = POLLOUT;
snprintf(message, FTS_MSG_MAX_LEN, FTS_MSG_FORMAT,
FTS_MSG_PROBE,
ftsInfo->primary_cdbinfo->config->dbid,
ftsInfo->primary_cdbinfo->config->segindex);
expect_value(PQstatus, conn, ftsInfo->conn);
will_return(PQstatus, CONNECTION_OK);
expect_value(PQsendQuery, conn, ftsInfo->conn);
expect_string(PQsendQuery, query, message);
will_return(PQsendQuery, 1);
ftsSend(&context);
assert_true(ftsInfo->poll_events & POLLIN);
}
/*
* Receive a response to probe message from one primary segment.
*/
static void
test_ftsReceive_success(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_INSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SEGMENT);
static int true_value = 1;
static int false_value = 0;
fts_segment_info *ftsInfo = &context.perSegInfos[0];
ftsInfo->state = FTS_PROBE_SEGMENT;
ftsInfo->conn = palloc(sizeof(PGconn));
ftsInfo->conn->status = CONNECTION_OK;
/* Simulate the case that data has arrived on this socket. */
ftsInfo->poll_revents = POLLIN;
/* PQstatus is called twice. */
expect_value(PQstatus, conn, ftsInfo->conn);
will_return(PQstatus, CONNECTION_OK);
expect_value(PQstatus, conn, ftsInfo->conn);
will_return(PQstatus, CONNECTION_OK);
/* Expect async libpq interface to receive is called */
expect_value(PQconsumeInput, conn, ftsInfo->conn);
will_return(PQconsumeInput, 1);
expect_value(PQisBusy, conn, ftsInfo->conn);
will_return(PQisBusy, 0);
ftsInfo->conn->result = palloc(sizeof(PGresult));
expect_value(PQgetResult, conn, ftsInfo->conn);
will_return(PQgetResult, ftsInfo->conn->result);
expect_value(PQresultStatus, res, ftsInfo->conn->result);
will_return(PQresultStatus, PGRES_TUPLES_OK);
expect_value(PQntuples, res, ftsInfo->conn->result);
will_return(PQntuples, FTS_MESSAGE_RESPONSE_NTUPLES);
expect_value(PQnfields, res, ftsInfo->conn->result);
will_return(PQnfields, Natts_fts_message_response);
expect_value(PQgetvalue, res, ftsInfo->conn->result);
expect_value(PQgetvalue, tup_num, 0);
expect_value(PQgetvalue, field_num, Anum_fts_message_response_is_mirror_up);
will_return(PQgetvalue, &true_value);
expect_value(PQgetvalue, res, ftsInfo->conn->result);
expect_value(PQgetvalue, tup_num, 0);
expect_value(PQgetvalue, field_num, Anum_fts_message_response_is_in_sync);
will_return(PQgetvalue, &true_value);
expect_value(PQgetvalue, res, ftsInfo->conn->result);
expect_value(PQgetvalue, tup_num, 0);
expect_value(PQgetvalue, field_num, Anum_fts_message_response_is_syncrep_enabled);
will_return(PQgetvalue, &true_value);
expect_value(PQgetvalue, res, ftsInfo->conn->result);
expect_value(PQgetvalue, tup_num, 0);
expect_value(PQgetvalue, field_num, Anum_fts_message_response_is_role_mirror);
will_return(PQgetvalue, &false_value);
expect_value(PQgetvalue, res, ftsInfo->conn->result);
expect_value(PQgetvalue, tup_num, 0);
expect_value(PQgetvalue, field_num, Anum_fts_message_response_request_retry);
will_return(PQgetvalue, &false_value);
ftsReceive(&context);
assert_true(ftsInfo->result.isPrimaryAlive);
assert_true(ftsInfo->result.isMirrorAlive);
assert_false(ftsInfo->result.retryRequested);
/*
* No further polling on this socket, until it's time to send the next
* message.
*/
assert_true(ftsInfo->poll_events == 0);
}
/*
* Scenario: if primary responds FATAL to FTS probe, ftsReceive on coordinator
* should fail due to PQconsumeInput() failed
*/
static void
test_ftsReceive_when_fts_handler_FATAL(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_INSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SEGMENT);
fts_segment_info *ftsInfo = &context.perSegInfos[0];
/* Simulate that data is available for reading from the socket. */
ftsInfo->poll_revents = POLLIN;
expect_value(PQstatus, conn, ftsInfo->conn);
will_return(PQstatus, CONNECTION_OK);
expect_value(PQconsumeInput, conn, ftsInfo->conn);
will_return(PQconsumeInput, 0);
expect_value(PQerrorMessage, conn, ftsInfo->conn);
will_return(PQerrorMessage, "");
/*
* TEST
*/
ftsReceive(&context);
assert_true(ftsInfo->state == FTS_PROBE_FAILED);
}
/*
* Scenario: if primary response ERROR to FTS probe, ftsReceive on coordinator
* should fail due to PQresultStatus(lastResult) returned PGRES_FATAL_ERROR
*/
static void
test_ftsReceive_when_fts_handler_ERROR(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_INSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
/*
* As long as it is one of the states in which an FTS message can be sent
* and a response be received, the state doesn't matter. Here we chose
* FTS_PROMOTE_SEGMENT, to simulate a response being received for a PROMOTE
* message.
*/
init_fts_context(&context, FTS_PROMOTE_SEGMENT);
fts_segment_info *ftsInfo = &context.perSegInfos[0];
/* Simulate that data is available for reading from the socket. */
ftsInfo->poll_revents = POLLIN;
/*
* PQstatus is called once before consuming input and once more, after
* parsing results.
*/
expect_value_count(PQstatus, conn, ftsInfo->conn, 2);
will_return_count(PQstatus, CONNECTION_OK, 2);
expect_value(PQconsumeInput, conn, ftsInfo->conn);
will_return(PQconsumeInput, 1);
expect_value(PQisBusy, conn, ftsInfo->conn);
will_return(PQisBusy, 0);
PGresult *result = palloc(sizeof(PGresult));
expect_value(PQgetResult, conn, ftsInfo->conn);
will_return(PQgetResult, result);
expect_value(PQresultStatus, res, result);
will_return(PQresultStatus, PGRES_FATAL_ERROR);
expect_value(PQresultErrorMessage, res, result);
will_return(PQresultErrorMessage, "");
expect_value(PQclear, res, result);
will_be_called(PQclear);
/*
* TEST
*/
ftsReceive(&context);
assert_true(ftsInfo->state == FTS_PROMOTE_FAILED);
}
/*
* 2 primary-mirror pairs: one got a "request retry" response from primary,
* syncrep_off message failed to get a response from the other primary.
* Another attempt must be made in both cases after waiting for 1 second.
*/
static void
test_processRetry_wait_before_retry(void **state)
{
/* Start with a failure state and retry_count = 0. */
CdbComponentDatabases *cdbs = InitTestCdb(
2, true, GP_SEGMENT_CONFIGURATION_MODE_INSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_SYNCREP_OFF_FAILED);
/* First primary sent a response with requestRetry set. */
fts_segment_info *ftsInfo1 = &context.perSegInfos[0];
ftsInfo1->state = FTS_PROBE_SUCCESS;
ftsInfo1->result.isPrimaryAlive = true;
ftsInfo1->result.retryRequested = true;
/* Second primary didn't respond to syncrep_off message. */
fts_segment_info *ftsInfo2 = &context.perSegInfos[1];
expect_value(PQfinish, conn, ftsInfo1->conn);
will_be_called(PQfinish);
expect_value(PQfinish, conn, ftsInfo2->conn);
will_be_called(PQfinish);
processRetry(&context);
/* We must wait in a retry_wait state with retryStartTime set. */
assert_true(ftsInfo1->state == FTS_PROBE_RETRY_WAIT);
assert_true(ftsInfo2->state == FTS_SYNCREP_OFF_RETRY_WAIT);
assert_true(ftsInfo1->retry_count == 1);
assert_true(ftsInfo1->poll_events == 0);
assert_true(ftsInfo1->poll_revents == 0);
pg_time_t retryStartTime1 = ftsInfo1->retryStartTime;
assert_true(retryStartTime1 > 0);
assert_true(ftsInfo2->retry_count == 1);
assert_true(ftsInfo2->poll_events == 0);
assert_true(ftsInfo2->poll_revents == 0);
pg_time_t retryStartTime2 = ftsInfo2->retryStartTime;
assert_true(retryStartTime2 > 0);
/*
* We must continue to wait because 1 second hasn't elapsed since the
* failure.
*/
processRetry(&context);
assert_true(ftsInfo1->state == FTS_PROBE_RETRY_WAIT);
assert_true(ftsInfo2->state == FTS_SYNCREP_OFF_RETRY_WAIT);
/*
* Adjust retryStartTime to 1 second in past so that next processRetry()
* should make a retry attempt.
*/
ftsInfo1->retryStartTime = retryStartTime1 - 1;
ftsInfo2->retryStartTime = retryStartTime2 - 1;
processRetry(&context);
/* This time, we must be ready to make another retry. */
assert_true(ftsInfo1->state == FTS_PROBE_SEGMENT);
assert_true(ftsInfo2->state == FTS_SYNCREP_OFF_SEGMENT);
}
/* 0 segments, is_updated is always false */
static void
test_processResponse_for_zero_segment(void **state)
{
fts_context context;
context.num_pairs = 0;
bool is_updated = processResponse(&context);
assert_false(is_updated);
}
/*
* 1 segment, is_updated is false, because FtsIsActive failed
*/
static void
test_processResponse_for_FtsIsActive_false(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_INSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
context.perSegInfos[0].result.isPrimaryAlive = true;
context.perSegInfos[0].result.isMirrorAlive = true;
context.perSegInfos[0].result.isSyncRepEnabled = true;
/* mock FtsIsActive false */
will_return(FtsIsActive, false);
bool is_updated = processResponse(&context);
assert_false(is_updated);
assert_true(context.perSegInfos[0].state == FTS_PROBE_SUCCESS);
}
/*
* 2 segments, is_updated is false, because neither primary nor mirror
* state changed.
*/
static void
test_PrimayUpMirrorUpNotInSync_to_PrimayUpMirrorUpNotInSync(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
2, true, GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
will_return_count(FtsIsActive, true, 2);
/* Primary must block commits as long as it and its mirror are alive. */
context.perSegInfos[0].result.isPrimaryAlive = true;
context.perSegInfos[0].result.isMirrorAlive = true;
context.perSegInfos[0].result.isSyncRepEnabled = true;
context.perSegInfos[1].result.isPrimaryAlive = true;
context.perSegInfos[1].result.isMirrorAlive = true;
context.perSegInfos[1].result.isSyncRepEnabled = true;
expect_value(PQfinish, conn, context.perSegInfos[0].conn);
will_be_called(PQfinish);
expect_value(PQfinish, conn, context.perSegInfos[1].conn);
will_be_called(PQfinish);
/* processResponse should not update a probe state */
bool is_updated = processResponse(&context);
/* Active connections must be closed after processing response. */
assert_true(context.perSegInfos[0].conn == NULL);
assert_true(context.perSegInfos[1].conn == NULL);
assert_false(is_updated);
}
/*
* 2 segments, is_updated is false, because its double fault scenario
* primary and mirror are not in sync hence cannot promote mirror, hence
* current primary needs to stay marked as up.
*/
static void
test_PrimayUpMirrorUpNotInSync_to_PrimaryDown(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
2, true, GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
will_return_count(FtsIsActive, true, 2);
/* No response received from first segment */
context.perSegInfos[0].state = FTS_PROBE_FAILED;
context.perSegInfos[0].retry_count = gp_fts_probe_retries;
/* Response received from second segment */
context.perSegInfos[1].result.isPrimaryAlive = true;
context.perSegInfos[1].result.isMirrorAlive = true;
context.perSegInfos[1].result.isSyncRepEnabled = true;
expect_value(PQfinish, conn, context.perSegInfos[0].conn);
will_be_called(PQfinish);
expect_value(PQfinish, conn, context.perSegInfos[1].conn);
will_be_called(PQfinish);
/* No update must happen */
bool is_updated = processResponse(&context);
/* Active connections must be closed after processing response. */
assert_true(context.perSegInfos[0].conn == NULL);
assert_true(context.perSegInfos[1].conn == NULL);
assert_false(is_updated);
}
/*
* 2 segments, is_updated is true, because content 0 mirror is updated
*/
static void
test_PrimayUpMirrorUpNotInSync_to_PrimaryUpMirrorDownNotInSync(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
2, true, GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
will_return_count(FtsIsActive, true, 2);
/*
* Test response received from both segments. First primary's mirror is
* reported as DOWN.
*/
context.perSegInfos[0].result.isPrimaryAlive = true;
context.perSegInfos[0].result.isMirrorAlive = false;
/* Syncrep must be enabled because mirror is up. */
context.perSegInfos[1].result.isPrimaryAlive = true;
context.perSegInfos[1].result.isMirrorAlive = true;
context.perSegInfos[1].result.isSyncRepEnabled = true;
/* the mirror will be updated */
PrimaryOrMirrorWillBeUpdated(1);
ExpectedPrimaryAndMirrorConfiguration(
context.perSegInfos[0].primary_cdbinfo, /* primary */
context.perSegInfos[0].mirror_cdbinfo, /* mirror */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* primary status */
GP_SEGMENT_CONFIGURATION_STATUS_DOWN, /* mirror status */
GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC, /* mode */
GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY, /* newPrimaryRole */
GP_SEGMENT_CONFIGURATION_ROLE_MIRROR, /* newMirrorRole */
false, /* willUpdatePrimary */
true /* willUpdateMirror */);
expect_value(PQfinish, conn, context.perSegInfos[0].conn);
will_be_called(PQfinish);
expect_value(PQfinish, conn, context.perSegInfos[1].conn);
will_be_called(PQfinish);
bool is_updated = processResponse(&context);
assert_true(is_updated);
/* Active connections must be closed after processing response. */
assert_true(context.perSegInfos[0].conn == NULL);
assert_true(context.perSegInfos[1].conn == NULL);
}
/*
* 3 segments, is_updated is true, because content 0 mirror is down and
* probe response is up
*/
static void
test_PrimaryUpMirrorDownNotInSync_to_PrimayUpMirrorUpNotInSync(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
3, true, GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
will_return_count(FtsIsActive, true, 3);
/* set the mirror down in config */
CdbComponentDatabaseInfo *cdbinfo =
GetSegmentFromCdbComponentDatabases(
cdbs, 0, GP_SEGMENT_CONFIGURATION_ROLE_MIRROR);
cdbinfo->config->status = GP_SEGMENT_CONFIGURATION_STATUS_DOWN;
/*
* Response received from all three segments, DOWN mirror is reported UP
* for first primary.
*/
context.perSegInfos[0].result.isPrimaryAlive = true;
context.perSegInfos[0].result.isMirrorAlive = true;
context.perSegInfos[0].result.isSyncRepEnabled = true;
/* no change */
context.perSegInfos[1].result.isPrimaryAlive = true;
context.perSegInfos[1].result.isMirrorAlive = true;
context.perSegInfos[1].result.isSyncRepEnabled = true;
context.perSegInfos[2].result.isPrimaryAlive = true;
context.perSegInfos[2].result.isMirrorAlive = true;
context.perSegInfos[2].result.isSyncRepEnabled = true;
/* the mirror will be updated */
PrimaryOrMirrorWillBeUpdated(1);
ExpectedPrimaryAndMirrorConfiguration(
context.perSegInfos[0].primary_cdbinfo, /* primary */
context.perSegInfos[0].mirror_cdbinfo, /* primary */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* primary status */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* mirror status */
GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC, /* mode */
GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY, /* newPrimaryRole */
GP_SEGMENT_CONFIGURATION_ROLE_MIRROR, /* newMirrorRole */
false, /* willUpdatePrimary */
true /* willUpdateMirror */);
/* Active connections must be closed after processing response. */
expect_value(PQfinish, conn, context.perSegInfos[0].conn);
will_be_called(PQfinish);
expect_value(PQfinish, conn, context.perSegInfos[1].conn);
will_be_called(PQfinish);
expect_value(PQfinish, conn, context.perSegInfos[2].conn);
will_be_called(PQfinish);
bool is_updated = processResponse(&context);
assert_true(is_updated);
/*
* Assert that connections are closed and the state of the segments is not
* changed (no further messages needed from FTS).
*/
assert_true(context.perSegInfos[0].conn == NULL);
assert_true(context.perSegInfos[0].state == FTS_RESPONSE_PROCESSED);
assert_true(context.perSegInfos[1].conn == NULL);
assert_true(context.perSegInfos[1].state == FTS_RESPONSE_PROCESSED);
assert_true(context.perSegInfos[2].conn == NULL);
assert_true(context.perSegInfos[2].state == FTS_RESPONSE_PROCESSED);
}
/*
* 5 segments, is_updated is true, as we are changing the state of several
* segment pairs. This test also validates that sync-rep off and promotion
* messages are not blocked by primary retry requests.
*/
static void
test_processResponse_multiple_segments(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
5, true, GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
will_return_count(FtsIsActive, true, 5);
/*
* Mark the mirror for content 0 down in configuration, probe response
* indicates it's up.
*/
CdbComponentDatabaseInfo *cdbinfo =
GetSegmentFromCdbComponentDatabases(
cdbs, 0, GP_SEGMENT_CONFIGURATION_ROLE_MIRROR);
cdbinfo->config->status = GP_SEGMENT_CONFIGURATION_STATUS_DOWN;
/* First segment DOWN mirror, now reported UP */
context.perSegInfos[0].result.isPrimaryAlive = true;
context.perSegInfos[0].result.isMirrorAlive = true;
context.perSegInfos[0].result.isSyncRepEnabled = true;
/*
* Mark the primary-mirror pair for content 1 as in-sync in configuration
* so that the mirror can be promoted.
*/
cdbinfo = GetSegmentFromCdbComponentDatabases(
cdbs, 1, GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY);
cdbinfo->config->mode = GP_SEGMENT_CONFIGURATION_MODE_INSYNC;
cdbinfo = GetSegmentFromCdbComponentDatabases(
cdbs, 1, GP_SEGMENT_CONFIGURATION_ROLE_MIRROR);
cdbinfo->config->mode = GP_SEGMENT_CONFIGURATION_MODE_INSYNC;
/* Second segment no response received, mirror will be promoted */
context.perSegInfos[1].state = FTS_PROBE_FAILED;
context.perSegInfos[1].retry_count = gp_fts_probe_retries;
/* Third segment UP mirror, now reported DOWN */
context.perSegInfos[2].result.isPrimaryAlive = true;
context.perSegInfos[2].result.isSyncRepEnabled = true;
context.perSegInfos[2].result.isMirrorAlive = false;
/* Fourth segment, response received no change */
context.perSegInfos[3].result.isPrimaryAlive = true;
context.perSegInfos[3].result.isSyncRepEnabled = true;
context.perSegInfos[3].result.isMirrorAlive = true;
/* Fifth segment, probe failed but retries not exhausted */
context.perSegInfos[4].result.isPrimaryAlive = false;
context.perSegInfos[4].result.isSyncRepEnabled = false;
context.perSegInfos[4].result.isMirrorAlive = false;
context.perSegInfos[4].state = FTS_PROBE_RETRY_WAIT;
/* we are updating three of the five segments */
PrimaryOrMirrorWillBeUpdated(3);
/* First segment */
ExpectedPrimaryAndMirrorConfiguration(
context.perSegInfos[0].primary_cdbinfo, /* primary */
context.perSegInfos[0].mirror_cdbinfo, /* mirror */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* primary status */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* mirror status */
GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC, /* mode */
GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY, /* newPrimaryRole */
GP_SEGMENT_CONFIGURATION_ROLE_MIRROR, /* newMirrorRole */
false, /* willUpdatePrimary */
true /* willUpdateMirror */);
/*
* Second segment should go through mirror promotion.
*/
ExpectedPrimaryAndMirrorConfiguration(
context.perSegInfos[1].primary_cdbinfo, /* primary */
context.perSegInfos[1].mirror_cdbinfo, /* mirror */
GP_SEGMENT_CONFIGURATION_STATUS_DOWN, /* primary status */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* mirror status */
GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC, /* mode */
GP_SEGMENT_CONFIGURATION_ROLE_MIRROR, /* newPrimaryRole */
GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY, /* newMirrorRole */
true, /* willUpdatePrimary */
true /* willUpdateMirror */);
/* Third segment */
ExpectedPrimaryAndMirrorConfiguration(
context.perSegInfos[2].primary_cdbinfo, /* primary */
context.perSegInfos[2].mirror_cdbinfo, /* mirror */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* primary status */
GP_SEGMENT_CONFIGURATION_STATUS_DOWN, /* mirror status */
GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC, /* mode */
GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY, /* newPrimaryRole */
GP_SEGMENT_CONFIGURATION_ROLE_MIRROR, /* newMirrorRole */
false, /* willUpdatePrimary */
true /* willUpdateMirror */);
/* Fourth segment will not change status */
/* Active connections must be closed after processing response. */
expect_value(PQfinish, conn, context.perSegInfos[0].conn);
will_be_called(PQfinish);
expect_value(PQfinish, conn, context.perSegInfos[1].conn);
will_be_called(PQfinish);
expect_value(PQfinish, conn, context.perSegInfos[2].conn);
will_be_called(PQfinish);
expect_value(PQfinish, conn, context.perSegInfos[3].conn);
will_be_called(PQfinish);
bool is_updated = processResponse(&context);
assert_true(is_updated);
/* mirror found up */
assert_true(context.perSegInfos[0].state == FTS_RESPONSE_PROCESSED);
/* mirror promotion should be triggered */
assert_true(context.perSegInfos[1].state == FTS_PROMOTE_SEGMENT);
/* mirror found down, must turn off syncrep on primary */
assert_true(context.perSegInfos[2].state == FTS_SYNCREP_OFF_SEGMENT);
/* no change in configuration */
assert_true(context.perSegInfos[3].state == FTS_RESPONSE_PROCESSED);
/* retry possible, final state is not yet reached */
assert_true(context.perSegInfos[4].state == FTS_PROBE_RETRY_WAIT);
}
/*
* 1 segment, is_updated is true, because primary and mirror will be
* marked not in sync
*/
static void
test_PrimayUpMirrorUpSync_to_PrimaryUpMirrorUpNotInSync(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_INSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
will_return(FtsIsActive, true);
/* Probe responded with Mirror Up and Not In SYNC with syncrep enabled */
context.perSegInfos[0].result.isPrimaryAlive = true;
context.perSegInfos[0].result.isMirrorAlive = true;
context.perSegInfos[0].result.isInSync = false;
context.perSegInfos[0].result.isSyncRepEnabled = true;
/* we are updating one segment pair */
PrimaryOrMirrorWillBeUpdated(1);
ExpectedPrimaryAndMirrorConfiguration(
context.perSegInfos[0].primary_cdbinfo, /* primary */
context.perSegInfos[0].mirror_cdbinfo, /* mirror */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* primary status */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* mirror status */
GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC, /* mode */
GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY, /* newPrimaryRole */
GP_SEGMENT_CONFIGURATION_ROLE_MIRROR, /* newMirrorRole */
true, /* willUpdatePrimary */
true /* willUpdateMirror */);
/* Active connections must be closed after processing response. */
expect_value(PQfinish, conn, context.perSegInfos[0].conn);
will_be_called(PQfinish);
bool is_updated = processResponse(&context);
assert_true(is_updated);
assert_true(context.perSegInfos[0].state == FTS_RESPONSE_PROCESSED);
assert_true(context.perSegInfos[0].conn == NULL);
}
/*
* 2 segments, is_updated is true, because mirror will be marked down and
* both will be marked not in sync for first primary mirror pair
*/
static void
test_PrimayUpMirrorUpSync_to_PrimaryUpMirrorDownNotInSync(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
2, true, GP_SEGMENT_CONFIGURATION_MODE_INSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
will_return_count(FtsIsActive, true, 2);
/*
* Probe responded with one mirror down and not in-sync, and syncrep
* enabled on both primaries.
*/
context.perSegInfos[0].result.isPrimaryAlive = true;
context.perSegInfos[0].result.isMirrorAlive = false;
context.perSegInfos[0].result.isInSync = false;
context.perSegInfos[0].result.isSyncRepEnabled = true;
context.perSegInfos[1].result.isPrimaryAlive = true;
context.perSegInfos[1].result.isMirrorAlive = true;
context.perSegInfos[1].result.isInSync = true;
context.perSegInfos[1].result.isSyncRepEnabled = true;
/* we are updating one segment pair */
PrimaryOrMirrorWillBeUpdated(1);
ExpectedPrimaryAndMirrorConfiguration(
context.perSegInfos[0].primary_cdbinfo, /* primary */
context.perSegInfos[0].mirror_cdbinfo, /* mirror */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* primary status */
GP_SEGMENT_CONFIGURATION_STATUS_DOWN, /* mirror status */
GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC, /* mode */
GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY, /* newPrimaryRole */
GP_SEGMENT_CONFIGURATION_ROLE_MIRROR, /* newMirrorRole */
true, /* willUpdatePrimary */
true /* willUpdateMirror */);
/* Active connections must be closed after processing response. */
expect_value(PQfinish, conn, context.perSegInfos[0].conn);
will_be_called(PQfinish);
expect_value(PQfinish, conn, context.perSegInfos[1].conn);
will_be_called(PQfinish);
bool is_updated = processResponse(&context);
assert_true(is_updated);
/* mirror is down but syncrep is enabled, so it must be turned off */
assert_true(context.perSegInfos[0].state == FTS_SYNCREP_OFF_SEGMENT);
/* no change in config */
assert_true(context.perSegInfos[1].state == FTS_RESPONSE_PROCESSED);
assert_true(context.perSegInfos[0].conn == NULL);
assert_true(context.perSegInfos[1].conn == NULL);
}
/*
* 1 segment, is_updated is true, because FTS found primary goes down and
* both will be marked not in sync, then FTS promote mirror
*/
static void
test_PrimayUpMirrorUpSync_to_PrimaryDown_to_MirrorPromote(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_INSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
will_return(FtsIsActive, true);
/* Probe response was not received. */
context.perSegInfos[0].state = FTS_PROBE_FAILED;
context.perSegInfos[0].retry_count = gp_fts_probe_retries;
/* Store reference to mirror object for validation later. */
CdbComponentDatabaseInfo *mirror = context.perSegInfos[0].mirror_cdbinfo;
/* we are updating one segment pair */
PrimaryOrMirrorWillBeUpdated(1);
ExpectedPrimaryAndMirrorConfiguration(
context.perSegInfos[0].primary_cdbinfo, /* primary */
context.perSegInfos[0].mirror_cdbinfo, /* mirror */
GP_SEGMENT_CONFIGURATION_STATUS_DOWN, /* primary status */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* mirror status */
GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC, /* mode */
GP_SEGMENT_CONFIGURATION_ROLE_MIRROR, /* newPrimaryRole */
GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY, /* newMirrorRole */
true, /* willUpdatePrimary */
true /* willUpdateMirror */);
/* Active connections must be closed after processing response. */
expect_value(PQfinish, conn, context.perSegInfos[0].conn);
will_be_called(PQfinish);
bool is_updated = processResponse(&context);
assert_true(is_updated);
/* the mirror must be marked for promotion */
assert_true(context.perSegInfos[0].state == FTS_PROMOTE_SEGMENT);
assert_int_equal(context.perSegInfos[0].primary_cdbinfo->config->dbid, mirror->config->dbid);
assert_true(context.perSegInfos[0].conn == NULL);
}
/*
* 1 segment, is_updated is true, because primary and mirror will be
* marked in sync
*/
static void
test_PrimayUpMirrorUpNotInSync_to_PrimayUpMirrorUpSync(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
will_return(FtsIsActive, true);
/* Probe responded with Mirror Up and SYNC */
context.perSegInfos[0].result.isPrimaryAlive = true;
context.perSegInfos[0].result.isMirrorAlive = true;
context.perSegInfos[0].result.isInSync = true;
context.perSegInfos[0].result.isSyncRepEnabled = true;
/* we are updating one segment pair */
PrimaryOrMirrorWillBeUpdated(1);
ExpectedPrimaryAndMirrorConfiguration(
context.perSegInfos[0].primary_cdbinfo, /* primary */
context.perSegInfos[0].mirror_cdbinfo, /* mirror */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* primary status */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* mirror status */
GP_SEGMENT_CONFIGURATION_MODE_INSYNC, /* mode */
GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY, /* newPrimaryRole */
GP_SEGMENT_CONFIGURATION_ROLE_MIRROR, /* newMirrorRole */
true, /* willUpdatePrimary */
true /* willUpdateMirror */);
/* Active connections must be closed after processing response. */
expect_value(PQfinish, conn, context.perSegInfos[0].conn);
will_be_called(PQfinish);
bool is_updated = processResponse(&context);
assert_true(is_updated);
assert_true(context.perSegInfos[0].state == FTS_RESPONSE_PROCESSED);
assert_true(context.perSegInfos[0].conn == NULL);
}
/*
* 1 segment, is_updated is true, because mirror will be marked UP and
* both primary and mirror should get updated to SYNC
*/
static void
test_PrimaryUpMirrorDownNotInSync_to_PrimayUpMirrorUpSync(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
will_return(FtsIsActive, true);
/* set the mirror down in config */
CdbComponentDatabaseInfo *cdbinfo =
GetSegmentFromCdbComponentDatabases(
cdbs, 0, GP_SEGMENT_CONFIGURATION_ROLE_MIRROR);
cdbinfo->config->status = GP_SEGMENT_CONFIGURATION_STATUS_DOWN;
/* Probe responded with Mirror Up and SYNC */
context.perSegInfos[0].result.isPrimaryAlive = true;
context.perSegInfos[0].result.isMirrorAlive = true;
context.perSegInfos[0].result.isInSync = true;
context.perSegInfos[0].result.isSyncRepEnabled = true;
/* we are updating one segment pair */
PrimaryOrMirrorWillBeUpdated(1);
ExpectedPrimaryAndMirrorConfiguration(
context.perSegInfos[0].primary_cdbinfo, /* primary */
context.perSegInfos[0].mirror_cdbinfo, /* mirror */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* primary status */
GP_SEGMENT_CONFIGURATION_STATUS_UP, /* mirror status */
GP_SEGMENT_CONFIGURATION_MODE_INSYNC, /* mode */
GP_SEGMENT_CONFIGURATION_ROLE_PRIMARY, /* newPrimaryRole */
GP_SEGMENT_CONFIGURATION_ROLE_MIRROR, /* newMirrorRole */
true, /* willUpdatePrimary */
true /* willUpdateMirror */);
/* Active connections must be closed after processing response. */
expect_value(PQfinish, conn, context.perSegInfos[0].conn);
will_be_called(PQfinish);
bool is_updated = processResponse(&context);
assert_true(is_updated);
assert_true(context.perSegInfos[0].conn == NULL);
assert_true(context.perSegInfos[0].state == FTS_RESPONSE_PROCESSED);
}
/*
* 1 segment, is_updated is false, because there is no status or mode change.
*/
static void
test_PrimaryUpMirrorDownNotInSync_to_PrimayUpMirrorDownNotInSync(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
will_return(FtsIsActive, true);
/* set the mirror down in config */
CdbComponentDatabaseInfo *cdbinfo =
GetSegmentFromCdbComponentDatabases(
cdbs, 0, GP_SEGMENT_CONFIGURATION_ROLE_MIRROR);
cdbinfo->config->status = GP_SEGMENT_CONFIGURATION_STATUS_DOWN;
/* Probe responded with Mirror Up and SYNC */
context.perSegInfos[0].result.isPrimaryAlive = true;
context.perSegInfos[0].result.isMirrorAlive = false;
context.perSegInfos[0].result.isInSync = false;
expect_value(PQfinish, conn, context.perSegInfos[0].conn);
will_be_called(PQfinish);
bool is_updated = processResponse(&context);
assert_false(is_updated);
assert_true(context.perSegInfos[0].conn == NULL);
assert_true(context.perSegInfos[0].state == FTS_RESPONSE_PROCESSED);
}
/*
* 2 segments, is_updated is false, because content 0 mirror is already
* down and probe response fails. Means double fault scenario.
*/
static void
test_PrimaryUpMirrorDownNotInSync_to_PrimaryDown(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
2, true, GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SUCCESS);
will_return_count(FtsIsActive, true, 2);
/* set the mirror down in config */
CdbComponentDatabaseInfo *cdbinfo =
GetSegmentFromCdbComponentDatabases(
cdbs, 0, GP_SEGMENT_CONFIGURATION_ROLE_MIRROR);
cdbinfo->config->status = GP_SEGMENT_CONFIGURATION_STATUS_DOWN;
/* No response received from segment 1 (content 0 primary) */
context.perSegInfos[0].state = FTS_PROBE_FAILED;
context.perSegInfos[0].retry_count = gp_fts_probe_retries;
/* No change for segment 2, probe successful */
context.perSegInfos[1].result.isPrimaryAlive = true;
context.perSegInfos[1].result.isSyncRepEnabled = true;
context.perSegInfos[1].result.isMirrorAlive = true;
/* Active connections must be closed after processing response. */
expect_value(PQfinish, conn, context.perSegInfos[0].conn);
will_be_called(PQfinish);
expect_value(PQfinish, conn, context.perSegInfos[1].conn);
will_be_called(PQfinish);
bool is_updated = processResponse(&context);
assert_false(is_updated);
assert_true(context.perSegInfos[0].conn == NULL);
assert_true(context.perSegInfos[1].conn == NULL);
assert_true(context.perSegInfos[0].state == FTS_RESPONSE_PROCESSED);
assert_true(context.perSegInfos[1].state == FTS_RESPONSE_PROCESSED);
}
/*
* 1 segment, probe times out.
*/
static void
test_probeTimeout(void **state)
{
CdbComponentDatabases *cdbs = InitTestCdb(
1, true, GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC);
fts_context context;
FtsWalRepInitProbeContext(cdbs, &context);
init_fts_context(&context, FTS_PROBE_SEGMENT);
pg_time_t now = (pg_time_t) time(NULL);
context.perSegInfos[0].startTime = now - gp_fts_probe_timeout - 1;
ftsCheckTimeout(&context.perSegInfos[0], now);
assert_true(context.perSegInfos[0].state == FTS_PROBE_FAILED);
/*
* Timeout should be treated as just another failure and should be
* retried.
*/
assert_true(context.perSegInfos[0].retry_count == 0);
}
static void
test_FtsWalRepInitProbeContext_initial_state(void **state)
{
fts_context context;
CdbComponentDatabases *cdb_component_dbs;
cdb_component_dbs = InitTestCdb(2,
true,
GP_SEGMENT_CONFIGURATION_MODE_NOTINSYNC);
FtsWalRepInitProbeContext(cdb_component_dbs, &context);
int i;
for (i=0; i < context.num_pairs; i++)
{
assert_true(context.perSegInfos[i].state == FTS_PROBE_SEGMENT);
assert_true(context.perSegInfos[i].retry_count == 0);
assert_true(context.perSegInfos[i].conn == NULL);
assert_true(context.perSegInfos[i].probe_errno == 0);
assert_true(context.perSegInfos[i].result.dbid ==
context.perSegInfos[i].primary_cdbinfo->config->dbid);
assert_false(context.perSegInfos[i].result.isPrimaryAlive);
assert_false(context.perSegInfos[i].result.isMirrorAlive);
assert_false(context.perSegInfos[i].result.isInSync);
}
}
int
main(int argc, char* argv[])
{
cmockery_parse_arguments(argc, argv);
const UnitTest tests[] = {
unit_test(test_ftsConnect_FTS_PROBE_SEGMENT),
unit_test(test_ftsConnect_one_failure_one_success),
unit_test(test_ftsConnect_ftsPoll),
unit_test(test_ftsSend_success),
unit_test(test_ftsReceive_success),
unit_test(test_ftsReceive_when_fts_handler_FATAL),
unit_test(test_ftsReceive_when_fts_handler_ERROR),
unit_test(test_processRetry_wait_before_retry),
/* -----------------------------------------------------------------------
* Group of tests for processResponse()
* -----------------------------------------------------------------------
*/
unit_test(test_processResponse_for_zero_segment),
unit_test(test_processResponse_for_FtsIsActive_false),
unit_test(test_processResponse_multiple_segments),
unit_test(test_PrimayUpMirrorUpSync_to_PrimaryUpMirrorUpNotInSync),
unit_test(test_PrimayUpMirrorUpSync_to_PrimaryUpMirrorDownNotInSync),
unit_test(test_PrimayUpMirrorUpSync_to_PrimaryDown_to_MirrorPromote),
unit_test(test_PrimayUpMirrorUpNotInSync_to_PrimayUpMirrorUpSync),
unit_test(test_PrimayUpMirrorUpNotInSync_to_PrimayUpMirrorUpNotInSync),
unit_test(test_PrimayUpMirrorUpNotInSync_to_PrimaryUpMirrorDownNotInSync),
unit_test(test_PrimayUpMirrorUpNotInSync_to_PrimaryDown),
unit_test(test_PrimaryUpMirrorDownNotInSync_to_PrimayUpMirrorUpSync),
unit_test(test_PrimaryUpMirrorDownNotInSync_to_PrimayUpMirrorUpNotInSync),
unit_test(test_PrimaryUpMirrorDownNotInSync_to_PrimayUpMirrorDownNotInSync),
unit_test(test_PrimaryUpMirrorDownNotInSync_to_PrimaryDown),
unit_test(test_probeTimeout),
/*-----------------------------------------------------------------------*/
unit_test(test_FtsWalRepInitProbeContext_initial_state)
};
MemoryContextInit();
InitFtsProbeInfo();
return run_tests(tests);
}
|
ee2b39d27372f547ec74d486c894db6e7bbd96ca
|
e814383d36a10839104efaa4df277996ab220fa3
|
/ompi/mca/pml/ob1/pml_ob1_accelerator.h
|
993790aaaa3dbe7ef67d30139f1050955a678fad
|
[
"mpich2",
"BSD-3-Clause-Open-MPI"
] |
permissive
|
open-mpi/ompi
|
a1d7483ae1d83dd8fd8ae3ee95e832e0a0ee04e3
|
1edfdb025c4450f694600083ad871cf06c8d45cd
|
refs/heads/main
| 2023-09-01T01:30:02.040705
| 2023-08-29T17:32:18
| 2023-08-29T17:32:18
| 24,107,001
| 2,008
| 973
|
NOASSERTION
| 2023-09-14T20:59:26
| 2014-09-16T16:08:30
|
C
|
UTF-8
|
C
| false
| false
| 1,745
|
h
|
pml_ob1_accelerator.h
|
/*
* Copyright (c) 2004-2006 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2013 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2006 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2015 NVIDIA Corporation. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2022 Amazon.com, Inc. or its affiliates. All Rights
* reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
/* Implements a progress engine based accelerator asynchronous copy implementation */
#ifndef OMPI_PML_OB1_ACCELERATOR_H
#define OMPI_PML_OB1_ACCELERATOR_H
#include "opal/mca/accelerator/accelerator.h"
#include "opal/mca/btl/btl.h"
OPAL_DECLSPEC int mca_pml_ob1_record_htod_event(char *msg, struct mca_btl_base_descriptor_t *frag);
OPAL_DECLSPEC opal_accelerator_stream_t *mca_pml_ob1_get_dtoh_stream(void);
OPAL_DECLSPEC opal_accelerator_stream_t *mca_pml_ob1_get_htod_stream(void);
OPAL_DECLSPEC int mca_pml_ob1_progress_one_htod_event(struct mca_btl_base_descriptor_t **);
OPAL_DECLSPEC int mca_pml_ob1_accelerator_init(void);
OPAL_DECLSPEC void mca_pml_ob1_accelerator_fini(void);
#endif /* OMPI_PML_OB1_ACCELERATOR_H */
|
b088456a20def813bab094a9e50d9786df4a74dc
|
bcb62f36caa6ab1f92715cffae29cf4353d247e8
|
/src/os/osSpTaskYield.c
|
92546a8fe381d9ccfe02623ae27b050cb805c3f9
|
[] |
no_license
|
n64decomp/mk64
|
4b74ffb8c896e739b908534b74bc1cb79a21ae60
|
c4c00f159845fddd1eced906c14cc25a0cb7a10b
|
refs/heads/master
| 2023-09-01T02:48:30.111868
| 2023-09-01T02:28:49
| 2023-09-01T02:28:49
| 304,205,548
| 476
| 88
| null | 2023-09-14T04:36:28
| 2020-10-15T04:01:57
|
C
|
UTF-8
|
C
| false
| false
| 104
|
c
|
osSpTaskYield.c
|
#include "libultra_internal.h"
void osSpTaskYield(void) {
__osSpSetStatus(SPSTATUS_SET_SIGNAL0);
}
|
1c3098c2f0fae531f4a08ebd283af2eba08de915
|
a9a591fc3964117db3b16583c3bfa5a24cfc0114
|
/lib/liblfds7.2.0/src/liblfds720/src/lfds720_hash_nodelete/lfds720_hash_n_iterate.c
|
672b9039d694dc53eeb097eef184929be1c32d76
|
[
"BSD-2-Clause"
] |
permissive
|
grz0zrg/fas
|
9cd0a55c7e86fcf5cafffd44ebdf633f3ee5fefa
|
07b08a77b78781dd5ed7984117f294fe4fcbe3fd
|
refs/heads/master
| 2022-09-18T05:29:27.983761
| 2022-08-30T19:53:19
| 2022-08-30T19:53:19
| 78,288,048
| 142
| 10
|
BSD-2-Clause
| 2021-05-25T01:43:22
| 2017-01-07T15:53:24
|
C
|
UTF-8
|
C
| false
| false
| 1,493
|
c
|
lfds720_hash_n_iterate.c
|
/***** includes *****/
#include "lfds720_hash_n_internal.h"
/****************************************************************************/
void lfds720_hash_n_iterate_init( struct lfds720_hash_n_state *has,
struct lfds720_hash_n_iterate *hai )
{
LFDS720_PAL_ASSERT( has != NULL );
LFDS720_PAL_ASSERT( hai != NULL );
hai->baus = has->baus_array;
hai->baus_end = has->baus_array + has->array_size;
hai->baue = NULL;
return;
}
/****************************************************************************/
int lfds720_hash_n_iterate( struct lfds720_hash_n_iterate *hai,
struct lfds720_hash_n_element **hae )
{
enum lfds720_misc_flag
finished_flag = LFDS720_MISC_FLAG_LOWERED;
int
rv = 0;
LFDS720_PAL_ASSERT( hai != NULL );
LFDS720_PAL_ASSERT( hae != NULL );
while( finished_flag == LFDS720_MISC_FLAG_LOWERED )
{
lfds720_btree_nu_get_by_absolute_position_and_then_by_relative_position( hai->baus, &hai->baue, LFDS720_BTREE_NU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS720_BTREE_NU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE );
if( hai->baue != NULL )
{
*hae = LFDS720_BTREE_NU_GET_VALUE_FROM_ELEMENT( *hai->baue );
finished_flag = LFDS720_MISC_FLAG_RAISED;
rv = 1;
}
if( hai->baue == NULL )
if( ++hai->baus == hai->baus_end )
{
*hae = NULL;
finished_flag = LFDS720_MISC_FLAG_RAISED;
}
}
return rv;
}
|
0f7049ff5b012c9c3f403ed9b8e71caadc67bdc8
|
22ad0b50c643bd20bd2268523e752733e3fd89c4
|
/AppleSources/libmalloc-166.251.2/src/magazine_tiny.c
|
6b465ed79570074ed2ee7b01f21fa82e9453233d
|
[
"MIT"
] |
permissive
|
tripleCC/Laboratory
|
857ad947b05c132c31744829c4add3140a6e7974
|
8df17d2da2d4f73c5f3911a6fd7c75f866ae1467
|
refs/heads/master
| 2023-06-08T00:16:10.093064
| 2023-06-02T13:09:41
| 2023-06-02T13:10:32
| 187,594,453
| 159
| 41
| null | null | null | null |
UTF-8
|
C
| false
| false
| 88,158
|
c
|
magazine_tiny.c
|
/*
* Copyright (c) 2015 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include "internal.h"
// The address and size of the block in mag_last_free are combined. These
// macros abstract construction of the combined value and extraction of the
// size and pointer.
#define TINY_MAG_LAST_FREE_FROM_PTR_AND_MSIZE(ptr, msize) (void *)(((uintptr_t)(ptr))|((msize_t)msize))
#define TINY_PTR_FROM_MAG_LAST_FREE(x) (void *)(((uintptr_t)(x)) & ~(TINY_QUANTUM - 1))
#define TINY_MSIZE_FROM_MAG_LAST_FREE(x) (msize_t)(((uintptr_t)(x)) & (TINY_QUANTUM - 1))
// Adjusts the pointer part of mag_last_free by a given amount in bytes. Must be
// a multiple of the quantum size (not checked).
#define TINY_MAG_LAST_FREE_PTR_ADJUST_PTR(x, size) (x) = ((void *)(x) + (size))
// Decrements the size part of mag_last_free by a given msize value. Must not
// reduce the msize part below zero (not checked).
#define TINY_MAG_LAST_FREE_PTR_DEC_MSIZE(x, msize_delta) (x) = ((void *)(x) - (msize_delta))
static MALLOC_INLINE MALLOC_ALWAYS_INLINE
mag_index_t
tiny_mag_get_thread_index(void)
{
#if CONFIG_TINY_USES_HYPER_SHIFT
if (os_likely(_os_cpu_number_override == -1)) {
return _os_cpu_number() >> hyper_shift;
} else {
return _os_cpu_number_override >> hyper_shift;
}
#else // CONFIG_SMALL_USES_HYPER_SHIFT
if (os_likely(_os_cpu_number_override == -1)) {
return _os_cpu_number();
} else {
return _os_cpu_number_override;
}
#endif // CONFIG_SMALL_USES_HYPER_SHIFT
}
static inline grain_t
tiny_slot_from_msize(msize_t msize)
{
return (!msize || (msize > NUM_TINY_SLOTS) ? NUM_TINY_SLOTS : msize - 1);
}
/*
* Get the size of the previous free block, which is stored in the last two
* bytes of the block. If the previous block is not free, then the result is
* undefined.
*/
static msize_t
get_tiny_previous_free_msize(const void *ptr)
{
// check whether the previous block is in the tiny region and a block header
// if so, then the size of the previous block is one, and there is no stored
// size.
if (ptr != TINY_REGION_FOR_PTR(ptr)) {
void *prev_block = (void *)((uintptr_t)ptr - TINY_QUANTUM);
uint32_t *prev_header = TINY_BLOCK_HEADER_FOR_PTR(prev_block);
msize_t prev_index = TINY_INDEX_FOR_PTR(prev_block);
if (BITARRAY_BIT(prev_header, prev_index)) {
return 1;
}
return TINY_PREVIOUS_MSIZE(ptr);
}
// don't read possibly unmapped memory before the beginning of the region
return 0;
}
static MALLOC_INLINE void
set_tiny_meta_header_in_use(const void *ptr, msize_t msize)
{
uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
msize_t index = TINY_INDEX_FOR_PTR(ptr);
msize_t clr_msize = msize - 1;
msize_t midx = (index >> 5) << 1;
uint32_t val = (1 << (index & 31));
#if DEBUG_MALLOC
if (msize > NUM_TINY_SLOTS) {
malloc_report(ASL_LEVEL_ERR, "set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr, msize);
}
if ((unsigned)index + (unsigned)msize > 0x10000) {
malloc_report(ASL_LEVEL_ERR, "set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr, msize);
}
if (msize > TINY_BITMAP_RANGE_LIMIT) {
malloc_report(ASL_LEVEL_ERROR, "set_tiny_meta_header_in_use() invariant broken (3) %p %d\n", ptr, msize);
}
#endif
block_header[midx] |= val; // BITARRAY_SET(block_header, index);
block_header[midx + 1] |= val; // BITARRAY_SET(in_use, index);
// bitarray_mclr(block_header, index, end_bit);
// bitarray_mclr(in_use, index, end_bit);
index++;
midx = (index >> 5) << 1;
unsigned start = index & 31;
unsigned end = start + clr_msize;
#if defined(__LP64__)
if (end > 63) {
unsigned mask0 = (0xFFFFFFFFU >> (31 - start)) >> 1;
unsigned mask1 = (0xFFFFFFFFU << (end - 64));
block_header[midx + 0] &= mask0; // clear header
block_header[midx + 1] &= mask0; // clear in_use
block_header[midx + 2] = 0; // clear header
block_header[midx + 3] = 0; // clear in_use
block_header[midx + 4] &= mask1; // clear header
block_header[midx + 5] &= mask1; // clear in_use
} else
#endif
if (end > 31) {
unsigned mask0 = (0xFFFFFFFFU >> (31 - start)) >> 1;
unsigned mask1 = (0xFFFFFFFFU << (end - 32));
block_header[midx + 0] &= mask0;
block_header[midx + 1] &= mask0;
block_header[midx + 2] &= mask1;
block_header[midx + 3] &= mask1;
} else {
unsigned mask = (0xFFFFFFFFU >> (31 - start)) >> 1;
mask |= (0xFFFFFFFFU << end);
block_header[midx + 0] &= mask;
block_header[midx + 1] &= mask;
}
// we set the block_header bit for the following block to reaffirm next block is a block
index += clr_msize;
midx = (index >> 5) << 1;
val = (1 << (index & 31));
block_header[midx] |= val; // BITARRAY_SET(block_header, (index+clr_msize));
#if DEBUG_MALLOC
{
boolean_t ff;
msize_t mf;
mf = get_tiny_meta_header(ptr, &ff);
if (msize != mf) {
malloc_report(ASL_LEVEL_INFO, "setting header for tiny in_use %p : %d\n", ptr, msize);
malloc_report(ASL_LEVEL_INFO, "reading header for tiny %p : %d %d\n", ptr, mf, ff);
}
}
#endif
}
static MALLOC_INLINE void set_tiny_meta_header_in_use_1(const void *ptr) // As above with msize == 1
{
uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
msize_t index = TINY_INDEX_FOR_PTR(ptr);
msize_t midx = (index >> 5) << 1;
uint32_t val = (1 << (index & 31));
block_header[midx] |= val; // BITARRAY_SET(block_header, index);
block_header[midx + 1] |= val; // BITARRAY_SET(in_use, index);
index++;
midx = (index >> 5) << 1;
val = (1 << (index & 31));
block_header[midx] |= val; // BITARRAY_SET(block_header, (index+clr_msize))
}
static MALLOC_INLINE void
set_tiny_meta_header_middle(const void *ptr)
{
// indicates this block is in the middle of an in use block
uint32_t *block_header;
uint32_t *in_use;
msize_t index;
block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
in_use = TINY_INUSE_FOR_HEADER(block_header);
index = TINY_INDEX_FOR_PTR(ptr);
BITARRAY_CLR(block_header, index);
BITARRAY_CLR(in_use, index);
}
static MALLOC_INLINE void
set_tiny_meta_header_free(const void *ptr, msize_t msize)
{
// !msize is acceptable and means 65536
uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
msize_t index = TINY_INDEX_FOR_PTR(ptr);
msize_t midx = (index >> 5) << 1;
uint32_t val = (1 << (index & 31));
#if DEBUG_MALLOC
if ((unsigned)index + (unsigned)msize > 0x10000) {
malloc_report(ASL_LEVEL_ERR, "setting header for tiny free %p msize too large: %d\n", ptr, msize);
}
#endif
block_header[midx] |= val; // BITARRAY_SET(block_header, index);
block_header[midx + 1] &= ~val; // BITARRAY_CLR(in_use, index);
// mark the end of this block if msize is > 1. For msize == 0, the whole
// region is free, so there is no following block. For msize == 1, there is
// no space to write the size on 64 bit systems. The size for 1 quantum
// blocks is computed from the metadata bitmaps.
if (msize > 1) {
void *follower = FOLLOWING_TINY_PTR(ptr, msize);
TINY_PREVIOUS_MSIZE(follower) = msize;
TINY_FREE_SIZE(ptr) = msize;
}
if (msize == 0) {
TINY_FREE_SIZE(ptr) = msize;
}
#if DEBUG_MALLOC
boolean_t ff;
msize_t mf = get_tiny_meta_header(ptr, &ff);
if ((msize != mf) || !ff) {
malloc_report(ASL_LEVEL_INFO, "setting header for tiny free %p : %u\n", ptr, msize);
malloc_report(ASL_LEVEL_INFO, "reading header for tiny %p : %u %u\n", ptr, mf, ff);
}
#endif
}
static MALLOC_INLINE boolean_t
tiny_meta_header_is_free(const void *ptr)
{
uint32_t *block_header;
uint32_t *in_use;
msize_t index;
block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
in_use = TINY_INUSE_FOR_HEADER(block_header);
index = TINY_INDEX_FOR_PTR(ptr);
if (!BITARRAY_BIT(block_header, index)) {
return 0;
}
return !BITARRAY_BIT(in_use, index);
}
static MALLOC_INLINE void *
tiny_previous_preceding_free(void *ptr, msize_t *prev_msize)
{
// returns the previous block, assuming and verifying it's free
uint32_t *block_header;
uint32_t *in_use;
msize_t index;
msize_t previous_msize;
msize_t previous_index;
void *previous_ptr;
block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
in_use = TINY_INUSE_FOR_HEADER(block_header);
index = TINY_INDEX_FOR_PTR(ptr);
if (!index) {
return NULL;
}
if ((previous_msize = get_tiny_previous_free_msize(ptr)) > index) {
return NULL;
}
previous_index = index - previous_msize;
previous_ptr = (void *)((uintptr_t)TINY_REGION_FOR_PTR(ptr) + TINY_BYTES_FOR_MSIZE(previous_index));
if (!BITARRAY_BIT(block_header, previous_index)) {
return NULL;
}
if (BITARRAY_BIT(in_use, previous_index)) {
return NULL;
}
if (get_tiny_free_size(previous_ptr) != previous_msize) {
return NULL;
}
// conservative check did match true check
*prev_msize = previous_msize;
return previous_ptr;
}
/*
* Adds an item to the proper free list, and also marks the meta-header of the
* block properly.
* Assumes szone has been locked
*/
static void
tiny_free_list_add_ptr(rack_t *rack, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize)
{
grain_t slot = (!msize || (msize > NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS : msize - 1;
tiny_free_list_t *free_ptr = ptr;
tiny_free_list_t *free_head = tiny_mag_ptr->mag_free_list[slot].p;
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_report(ASL_LEVEL_INFO, "in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
}
if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) {
malloc_zone_error(rack->debug_flags, true, "tiny_free_list_add_ptr: Unaligned ptr: %p\n", ptr);
}
#endif
set_tiny_meta_header_free(ptr, msize);
if (free_head) {
#if DEBUG_MALLOC
if (free_list_unchecksum_ptr(szone, &free_head->previous)) {
malloc_zone_error(rack->debug_flags, true,
"tiny_free_list_add_ptr: Internal invariant broken (free_head->previous): "
"ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, (void *)free_head, free_head->previous.p);
}
if (!tiny_meta_header_is_free(free_head)) {
malloc_zone_error(rack->debug_flags, true,
"tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer): "
"ptr=%p slot=%d free_head=%p\n", ptr, slot, (void *)free_head);
}
#endif
free_head->previous.u = free_list_checksum_ptr(rack, free_ptr);
} else {
BITMAPV_SET(tiny_mag_ptr->mag_bitmap, slot);
}
free_ptr->previous.u = free_list_checksum_ptr(rack, NULL);
free_ptr->next.u = free_list_checksum_ptr(rack, free_head);
tiny_mag_ptr->mag_free_list[slot].p = free_ptr;
}
/*
* Removes the item pointed to by ptr in the proper free list.
* Assumes szone has been locked
*/
static void
tiny_free_list_remove_ptr(rack_t *rack, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize)
{
grain_t slot = tiny_slot_from_msize(msize);
tiny_free_list_t *free_ptr = ptr, *next, *previous;
next = free_list_unchecksum_ptr(rack, &free_ptr->next);
previous = free_list_unchecksum_ptr(rack, &free_ptr->previous);
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_report(ASL_LEVEL_INFO, "In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
}
#endif
if (!previous) {
// The block to remove is the head of the free list
#if DEBUG_MALLOC
if (tiny_mag_ptr->mag_free_list[slot] != ptr) {
malloc_zone_error(rack->debug_flags, true,
"tiny_free_list_remove_ptr: Internal invariant broken (tiny_mag_ptr->mag_free_list[slot]): "
"ptr=%p slot=%d msize=%d tiny_mag_ptr->mag_free_list[slot]=%p\n", ptr, slot, msize,
(void *)tiny_mag_ptr->mag_free_list[slot]);
return;
}
#endif
tiny_mag_ptr->mag_free_list[slot].p = next;
if (!next) {
BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot);
}
} else {
// Check that the next pointer of "previous" points to free_ptr.
tiny_free_list_t *prev_next = free_list_unchecksum_ptr(rack, &previous->next);
if (prev_next != free_ptr) {
malloc_zone_error(rack->debug_flags, true,
"tiny_free_list_remove_ptr: Internal invariant broken (next ptr of prev): "
"ptr=%p, prev_next=%p\n", ptr, prev_next);
__builtin_unreachable(); // Always crashes in malloc_zone_error().
}
// We know free_ptr is already checksummed, so we don't need to do it
// again.
previous->next = free_ptr->next;
}
if (next) {
// Check that the previous pointer of "next" points to free_ptr.
tiny_free_list_t *next_prev = free_list_unchecksum_ptr(rack, &next->previous);
if (next_prev != free_ptr) {
malloc_zone_error(rack->debug_flags, true,
"tiny_free_list_remove_ptr: Internal invariant broken (prev ptr of next): "
"ptr=%p, next_prev=%p\n", ptr, next_prev);
__builtin_unreachable(); // Always crashes in malloc_zone_error().
}
// We know free_ptr is already checksummed, so we don't need to do it
// again.
next->previous = free_ptr->previous;
}
}
void
tiny_finalize_region(rack_t *rack, magazine_t *tiny_mag_ptr)
{
void *last_block, *previous_block;
uint32_t *last_header;
msize_t last_msize, previous_msize, last_index;
// It is possible that the block prior to the last block in the region has
// been free'd, but was not coalesced with the free bytes at the end of the
// block, since we treat the bytes at the end of the region as "in use" in
// the meta headers. Attempt to coalesce the last block with the previous
// block, so we don't violate the "no consecutive free blocks" invariant.
//
// FIXME: Need to investigate how much work would be required to increase
// 'mag_bytes_free_at_end' when freeing the preceding block, rather
// than performing this workaround.
//
if (tiny_mag_ptr->mag_bytes_free_at_end) {
last_block = (void *)((uintptr_t)TINY_REGION_END(tiny_mag_ptr->mag_last_region) - tiny_mag_ptr->mag_bytes_free_at_end);
last_msize = TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_end);
last_header = TINY_BLOCK_HEADER_FOR_PTR(last_block);
last_index = TINY_INDEX_FOR_PTR(last_block);
// Before anything we transform any remaining mag_bytes_free_at_end into a
// regular free block. We take special care here to update the bitfield
// information, since we are bypassing the normal free codepath. If there
// is more than one quanta worth of memory in mag_bytes_free_at_end, then
// there will be two block headers:
// 1) header for the free space at end, msize = 1
// 2) header inserted by set_tiny_meta_header_in_use after block
// We must clear the second one so that when the free block's size is
// queried, we do not think the block is only 1 quantum in size because
// of the second set header bit.
if (last_index != (NUM_TINY_BLOCKS - 1)) {
BITARRAY_CLR(last_header, (last_index + 1));
}
previous_block = tiny_previous_preceding_free(last_block, &previous_msize);
if (previous_block) {
set_tiny_meta_header_middle(last_block);
tiny_free_list_remove_ptr(rack, tiny_mag_ptr, previous_block, previous_msize);
last_block = previous_block;
last_msize += previous_msize;
}
// splice last_block into the free list
tiny_free_list_add_ptr(rack, tiny_mag_ptr, last_block, last_msize);
tiny_mag_ptr->mag_bytes_free_at_end = 0;
}
#if CONFIG_ASLR_INTERNAL
// Coalesce the big free block at start with any following free blocks
if (tiny_mag_ptr->mag_bytes_free_at_start) {
last_block = TINY_REGION_ADDRESS(tiny_mag_ptr->mag_last_region);
last_msize = TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_start);
void *next_block = (void *)((uintptr_t)last_block + tiny_mag_ptr->mag_bytes_free_at_start);
// clear the in use bit we were using to mark the end of the big start block
set_tiny_meta_header_middle((void *)((uintptr_t)next_block - TINY_QUANTUM));
// Coalesce the big start block with any following free blocks
if (tiny_meta_header_is_free(next_block)) {
msize_t next_msize = get_tiny_free_size(next_block);
set_tiny_meta_header_middle(next_block);
tiny_free_list_remove_ptr(rack, tiny_mag_ptr, next_block, next_msize);
last_msize += next_msize;
}
// splice last_block into the free list
tiny_free_list_add_ptr(rack, tiny_mag_ptr, last_block, last_msize);
tiny_mag_ptr->mag_bytes_free_at_start = 0;
}
#endif
tiny_mag_ptr->mag_last_region = NULL;
}
int
tiny_free_detach_region(rack_t *rack, magazine_t *tiny_mag_ptr, region_t r)
{
uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r);
uintptr_t current = start;
uintptr_t limit = (uintptr_t)TINY_REGION_END(r);
boolean_t is_free;
msize_t msize;
int total_alloc = 0;
while (current < limit) {
msize = get_tiny_meta_header((void *)current, &is_free);
if (is_free && !msize && (current == start)) {
// first block is all free
break;
}
if (!msize) {
#if DEBUG_MALLOC
malloc_report(ASL_LEVEL_ERR, "*** tiny_free_detach_region error with %p: msize=%d is_free=%d\n", (void *)current, msize, is_free);
#endif
break;
}
if (is_free) {
tiny_free_list_remove_ptr(rack, tiny_mag_ptr, (void *)current, msize);
} else {
total_alloc++;
}
current += TINY_BYTES_FOR_MSIZE(msize);
}
return total_alloc;
}
size_t
tiny_free_reattach_region(rack_t *rack, magazine_t *tiny_mag_ptr, region_t r)
{
uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r);
uintptr_t current = start;
uintptr_t limit = (uintptr_t)TINY_REGION_END(r);
boolean_t is_free;
msize_t msize;
size_t total_alloc = 0;
while (current < limit) {
msize = get_tiny_meta_header((void *)current, &is_free);
if (is_free && !msize && (current == start)) {
// first block is all free
break;
}
if (!msize) {
#if DEBUG_MALLOC
malloc_report(ASL_LEVEL_ERR, "*** tiny_free_reattach_region error with %p: msize=%d is_free=%d\n", (void *)current, msize, is_free);
#endif
break;
}
if (is_free) {
tiny_free_list_add_ptr(rack, tiny_mag_ptr, (void *)current, msize);
} else {
total_alloc += TINY_BYTES_FOR_MSIZE(msize);
}
current += TINY_BYTES_FOR_MSIZE(msize);
}
return total_alloc;
}
typedef struct {
uint8_t pnum, size;
} tiny_pg_pair_t;
void
tiny_free_scan_madvise_free(rack_t *rack, magazine_t *depot_ptr, region_t r)
{
uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r);
uintptr_t current = start;
uintptr_t limit = (uintptr_t)TINY_REGION_END(r);
boolean_t is_free;
msize_t msize;
tiny_pg_pair_t advisory[((TINY_REGION_PAYLOAD_BYTES + vm_page_quanta_size - 1) >> vm_page_quanta_shift) >>
1]; // 256bytes stack allocated
int advisories = 0;
// Scan the metadata identifying blocks which span one or more pages. Mark the pages MADV_FREE taking care to preserve free list
// management data.
while (current < limit) {
msize = get_tiny_meta_header((void *)current, &is_free);
if (is_free && !msize && (current == start)) {
// first block is all free
#if DEBUG_MALLOC
malloc_report(ASL_LEVEL_INFO, "*** tiny_free_scan_madvise_free first block is all free! %p: msize=%d is_free=%d\n", (void *)current,
msize, is_free);
#endif
uintptr_t pgLo = round_page_kernel(start + sizeof(tiny_free_list_t) + sizeof(msize_t));
uintptr_t pgHi = trunc_page_kernel(start + TINY_REGION_SIZE - sizeof(msize_t));
if (pgLo < pgHi) {
advisory[advisories].pnum = (pgLo - start) >> vm_kernel_page_shift;
advisory[advisories].size = (pgHi - pgLo) >> vm_kernel_page_shift;
advisories++;
}
break;
}
if (!msize) {
#if DEBUG_MALLOC
malloc_report(ASL_LEVEL_ERR, "*** tiny_free_scan_madvise_free error with %p: msize=%d is_free=%d\n", (void *)current, msize, is_free);
#endif
break;
}
if (is_free) {
uintptr_t pgLo = round_page_kernel(current + sizeof(tiny_free_list_t) + sizeof(msize_t));
uintptr_t pgHi = trunc_page_kernel(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t));
if (pgLo < pgHi) {
advisory[advisories].pnum = (pgLo - start) >> vm_kernel_page_shift;
advisory[advisories].size = (pgHi - pgLo) >> vm_kernel_page_shift;
advisories++;
}
}
current += TINY_BYTES_FOR_MSIZE(msize);
}
if (advisories > 0) {
int i;
// So long as the following hold for this region:
// (1) No malloc()'s are ever performed from the depot (hence free pages remain free,)
// (2) The region is not handed over to a per-CPU magazine (where malloc()'s could be performed),
// (3) The entire region is not mumap()'d (so the madvise's are applied to the intended addresses),
// then the madvise opportunities collected just above can be applied outside all locks.
// (1) is ensured by design, (2) and (3) are ensured by bumping the globally visible counter node->pinned_to_depot.
OSAtomicIncrement32Barrier(&(REGION_TRAILER_FOR_TINY_REGION(r)->pinned_to_depot));
SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr);
for (i = 0; i < advisories; ++i) {
uintptr_t addr = (advisory[i].pnum << vm_kernel_page_shift) + start;
size_t size = advisory[i].size << vm_kernel_page_shift;
mvm_madvise_free(rack, r, addr, addr + size, NULL, rack->debug_flags & MALLOC_DO_SCRIBBLE);
}
SZONE_MAGAZINE_PTR_LOCK(depot_ptr);
OSAtomicDecrement32Barrier(&(REGION_TRAILER_FOR_TINY_REGION(r)->pinned_to_depot));
}
}
static region_t
tiny_find_msize_region(rack_t *rack, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize)
{
tiny_free_list_t *ptr;
grain_t slot = tiny_slot_from_msize(msize);
free_list_t *free_list = tiny_mag_ptr->mag_free_list;
free_list_t *the_slot = free_list + slot;
free_list_t *limit;
#if defined(__LP64__)
uint64_t bitmap;
#else
uint32_t bitmap;
#endif
// Assumes we've locked the magazine
CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__);
// Look for an exact match by checking the freelist for this msize.
ptr = the_slot->p;
if (ptr) {
return TINY_REGION_FOR_PTR(ptr);
}
// Mask off the bits representing slots holding free blocks smaller than the
// size we need. If there are no larger free blocks, try allocating from
// the free space at the end of the tiny region.
#if defined(__LP64__)
bitmap = ((uint64_t *)(tiny_mag_ptr->mag_bitmap))[0] & ~((1ULL << slot) - 1);
#else
bitmap = tiny_mag_ptr->mag_bitmap[0] & ~((1 << slot) - 1);
#endif
if (!bitmap) {
return NULL;
}
slot = BITMAPV_CTZ(bitmap);
limit = free_list + NUM_TINY_SLOTS;
free_list += slot;
if (free_list < limit) {
ptr = free_list->p;
if (ptr) {
return TINY_REGION_FOR_PTR(ptr);
} else {
/* Shouldn't happen. Fall through to look at last slot. */
#if DEBUG_MALLOC
malloc_report(ASL_LEVEL_ERR, "in tiny_find_msize_region(), mag_bitmap out of sync, slot=%d\n", slot);
#endif
}
}
// We are now looking at the last slot, which contains blocks equal to, or
// due to coalescing of free blocks, larger than NUM_TINY_SLOTS * tiny quantum size.
ptr = limit->p;
if (ptr) {
return TINY_REGION_FOR_PTR(ptr);
}
return NULL;
}
#if CONFIG_MADVISE_PRESSURE_RELIEF
void
tiny_madvise_pressure_relief(rack_t *rack)
{
mag_index_t mag_index;
magazine_t *tiny_depot_ptr = (&rack->magazines[DEPOT_MAGAZINE_INDEX]);
for (mag_index = 0; mag_index < rack->num_magazines; mag_index++) {
size_t index;
for (index = 0; index < rack->region_generation->num_regions_allocated; ++index) {
SZONE_LOCK(TINY_SZONE_FROM_RACK(rack));
region_t tiny = rack->region_generation->hashed_regions[index];
if (!tiny || tiny == HASHRING_REGION_DEALLOCATED) {
SZONE_UNLOCK(TINY_SZONE_FROM_RACK(rack));
continue;
}
magazine_t *mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines,
REGION_TRAILER_FOR_TINY_REGION(tiny),
MAGAZINE_INDEX_FOR_TINY_REGION(tiny));
SZONE_UNLOCK(TINY_SZONE_FROM_RACK(rack));
/* Ordering is important here, the magazine of a region may potentially change
* during mag_lock_zine_for_region_trailer, so src_mag_index must be taken
* after we've obtained the lock.
*/
mag_index_t src_mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny);
/* We can (and must) ignore magazines that are already in the recirc depot. */
if (src_mag_index == DEPOT_MAGAZINE_INDEX) {
SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr);
continue;
}
if (tiny == mag_ptr->mag_last_region && (mag_ptr->mag_bytes_free_at_end || mag_ptr->mag_bytes_free_at_start)) {
tiny_finalize_region(rack, mag_ptr);
}
/* Because this region is currently in use, we can't safely madvise it while
* it's attached to the magazine. For this operation we have to remove it from
* the current mag, attach it to the depot and then madvise.
*/
recirc_list_extract(rack, mag_ptr, REGION_TRAILER_FOR_TINY_REGION(tiny));
int objects_in_use = tiny_free_detach_region(rack, mag_ptr, tiny);
SZONE_MAGAZINE_PTR_LOCK(tiny_depot_ptr);
MAGAZINE_INDEX_FOR_TINY_REGION(tiny) = DEPOT_MAGAZINE_INDEX;
REGION_TRAILER_FOR_TINY_REGION(tiny)->pinned_to_depot = 0;
size_t bytes_inplay = tiny_free_reattach_region(rack, tiny_depot_ptr, tiny);
/* Fix up the metadata of the target magazine while the region is in the depot. */
mag_ptr->mag_num_bytes_in_objects -= bytes_inplay;
mag_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES;
mag_ptr->mag_num_objects -= objects_in_use;
/* Now we can drop the magazine lock of the source mag. */
SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr);
tiny_depot_ptr->mag_num_bytes_in_objects += bytes_inplay;
tiny_depot_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES;
tiny_depot_ptr->mag_num_objects -= objects_in_use;
recirc_list_splice_last(rack, tiny_depot_ptr, REGION_TRAILER_FOR_TINY_REGION(tiny));
/* Actually do the scan, done holding the depot lock, the call will drop the lock
* around the actual madvise syscalls.
*/
tiny_free_scan_madvise_free(rack, tiny_depot_ptr, tiny);
/* Now the region is in the recirc depot, the next allocations to require more
* blocks will come along and take one of these regions back out of the depot.
* As OS X madvise's reuse on an per-region basis, we leave as many of these
* regions in the depot as possible after memory pressure.
*/
SZONE_MAGAZINE_PTR_UNLOCK(tiny_depot_ptr);
}
}
}
#endif // CONFIG_MADVISE_PRESSURE_RELIEF
static MALLOC_INLINE void
tiny_madvise_free_range_no_lock(rack_t *rack,
magazine_t *tiny_mag_ptr,
region_t region,
void *headptr,
size_t headsize,
void *ptr,
msize_t msize)
{
region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(region);
// Lock on tiny_magazines[mag_index] is already held here
// Calculate the first page in the coalesced block that would be safe to mark MADV_FREE
size_t free_header_size = sizeof(tiny_free_list_t) + sizeof(msize_t);
uintptr_t safe_ptr = (uintptr_t)ptr + free_header_size;
uintptr_t round_safe = round_page_kernel(safe_ptr);
// Calculate the last page in the coalesced block that would be safe to mark MADV_FREE
size_t free_tail_size = sizeof(msize_t);
uintptr_t safe_extent = (uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(msize) - free_tail_size;
uintptr_t trunc_extent = trunc_page_kernel(safe_extent);
// The newly freed block may complete a span of bytes that cover a page. Mark it with MADV_FREE.
if (round_safe < trunc_extent) { // Coalesced area covers a page (perhaps many)
// Extend the freed block by the free region header and tail sizes to include pages
// we may have coalesced that no longer host free region tails and headers.
// This may extend over in-use ranges, but the MIN/MAX clamping below will fix that up.
uintptr_t lo = trunc_page_kernel((uintptr_t)headptr - free_tail_size);
uintptr_t hi = round_page_kernel((uintptr_t)headptr + headsize + free_header_size);
uintptr_t free_lo = MAX(round_safe, lo);
uintptr_t free_hi = MIN(trunc_extent, hi);
if (free_lo < free_hi) {
tiny_free_list_remove_ptr(rack, tiny_mag_ptr, ptr, msize);
set_tiny_meta_header_in_use(ptr, msize);
OSAtomicIncrement32Barrier(&(node->pinned_to_depot));
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
mvm_madvise_free(rack, region, free_lo, free_hi, &rack->last_madvise, rack->debug_flags & MALLOC_DO_SCRIBBLE);
SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr);
OSAtomicDecrement32Barrier(&(node->pinned_to_depot));
set_tiny_meta_header_free(ptr, msize);
tiny_free_list_add_ptr(rack, tiny_mag_ptr, ptr, msize);
}
}
}
static boolean_t
tiny_get_region_from_depot(rack_t *rack, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize)
{
magazine_t *depot_ptr = &(rack->magazines[DEPOT_MAGAZINE_INDEX]);
/* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */
if (rack->num_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary
return 0;
}
#if DEBUG_MALLOC
if (DEPOT_MAGAZINE_INDEX == mag_index) {
malloc_zone_error(rack->debug_flags, true, "tiny_get_region_from_depot called for magazine index -1\n");
return 0;
}
#endif
SZONE_MAGAZINE_PTR_LOCK(depot_ptr);
// Appropriate a Depot'd region that can satisfy requested msize.
region_trailer_t *node;
region_t sparse_region;
while (1) {
sparse_region = tiny_find_msize_region(rack, depot_ptr, DEPOT_MAGAZINE_INDEX, msize);
if (NULL == sparse_region) { // Depot empty?
SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr);
return 0;
}
node = REGION_TRAILER_FOR_TINY_REGION(sparse_region);
if (0 >= node->pinned_to_depot) {
break;
}
SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr);
yield();
SZONE_MAGAZINE_PTR_LOCK(depot_ptr);
}
// disconnect node from Depot
recirc_list_extract(rack, depot_ptr, node);
// Iterate the region pulling its free entries off the (locked) Depot's free list
int objects_in_use = tiny_free_detach_region(rack, depot_ptr, sparse_region);
// Transfer ownership of the region
MAGAZINE_INDEX_FOR_TINY_REGION(sparse_region) = mag_index;
node->pinned_to_depot = 0;
// Iterate the region putting its free entries on its new (locked) magazine's free list
size_t bytes_inplay = tiny_free_reattach_region(rack, tiny_mag_ptr, sparse_region);
depot_ptr->mag_num_bytes_in_objects -= bytes_inplay;
depot_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES;
depot_ptr->mag_num_objects -= objects_in_use;
tiny_mag_ptr->mag_num_bytes_in_objects += bytes_inplay;
tiny_mag_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES;
tiny_mag_ptr->mag_num_objects += objects_in_use;
// connect to magazine as first node
recirc_list_splice_first(rack, tiny_mag_ptr, node);
SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr);
// DTrace USDT Probe
MAGMALLOC_DEPOTREGION(TINY_SZONE_FROM_RACK(rack), (int)mag_index, (void *)sparse_region,
TINY_REGION_SIZE, (int)BYTES_USED_FOR_TINY_REGION(sparse_region));
return 1;
}
#if CONFIG_RECIRC_DEPOT
static region_t
tiny_free_try_depot_unmap_no_lock(rack_t *rack, magazine_t *depot_ptr, region_trailer_t *node)
{
if (0 < node->bytes_used || 0 < node->pinned_to_depot ||
depot_ptr->recirculation_entries < recirc_retained_regions)
{
return NULL;
}
// disconnect node from Depot
recirc_list_extract(rack, depot_ptr, node);
// Iterate the region pulling its free entries off the (locked) Depot's free list
region_t sparse_region = TINY_REGION_FOR_PTR(node);
int objects_in_use = tiny_free_detach_region(rack, depot_ptr, sparse_region);
if (0 == objects_in_use) {
// Invalidate the hash table entry for this region with HASHRING_REGION_DEALLOCATED.
// Using HASHRING_REGION_DEALLOCATED preserves the collision chain, using HASHRING_OPEN_ENTRY (0) would not.
rgnhdl_t pSlot = hash_lookup_region_no_lock(rack->region_generation->hashed_regions,
rack->region_generation->num_regions_allocated,
rack->region_generation->num_regions_allocated_shift,
sparse_region);
if (NULL == pSlot) {
malloc_zone_error(rack->debug_flags, true, "tiny_free_try_depot_unmap_no_lock hash lookup failed: %p\n", sparse_region);
return NULL;
}
*pSlot = HASHRING_REGION_DEALLOCATED;
depot_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES;
// Atomically increment num_regions_dealloc
#ifdef __LP64___
OSAtomicIncrement64(&rack->num_regions_dealloc);
#else
OSAtomicIncrement32((int32_t *)&rack->num_regions_dealloc);
#endif
// Caller will transfer ownership of the region back to the OS with no locks held
MAGMALLOC_DEALLOCREGION(TINY_SZONE_FROM_RACK(rack), (void *)sparse_region, TINY_REGION_SIZE); // DTrace USDT Probe
return sparse_region;
} else {
malloc_zone_error(rack->debug_flags, true, "tiny_free_try_depot_unmap_no_lock objects_in_use not zero: %d\n", objects_in_use);
return NULL;
}
}
static boolean_t
tiny_free_do_recirc_to_depot(rack_t *rack, magazine_t *tiny_mag_ptr, mag_index_t mag_index)
{
// The entire magazine crossed the "emptiness threshold". Transfer a region
// from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e
// is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list.
region_trailer_t *node = tiny_mag_ptr->firstNode;
while (node && (!node->recirc_suitable || node->pinned_to_depot)) {
// If we skip a node due to pinned_to_depot being non-zero, it must be
// because another thread is madvising the same region in
// tiny_madvise_free_range_no_lock(), called from tiny_free_no_lock().
// When that's done, the same thread will enter tiny_free_try_recirc_to_depot()
// for the same region, which will come back here. So this just defers
// recirculation of the region.
node = node->next;
}
if (NULL == node) {
#if DEBUG_MALLOC
malloc_report(ASL_LEVEL_INFO, "*** tiny_free_do_recirc_to_depot end of list\n");
#endif
return TRUE; // Caller must SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
}
region_t sparse_region = TINY_REGION_FOR_PTR(node);
// Deal with unclaimed memory -- mag_bytes_free_at_end or mag_bytes_free_at_start
if (sparse_region == tiny_mag_ptr->mag_last_region &&
(tiny_mag_ptr->mag_bytes_free_at_end || tiny_mag_ptr->mag_bytes_free_at_start)) {
tiny_finalize_region(rack, tiny_mag_ptr);
}
// disconnect "suitable" node from magazine
recirc_list_extract(rack, tiny_mag_ptr, node);
// Iterate the region pulling its free entries off its (locked) magazine's free list
int objects_in_use = tiny_free_detach_region(rack, tiny_mag_ptr, sparse_region);
magazine_t *depot_ptr = &(rack->magazines[DEPOT_MAGAZINE_INDEX]);
// hand over the region to the (locked) Depot
SZONE_MAGAZINE_PTR_LOCK(depot_ptr);
// this will cause tiny_free_list_add_ptr called by tiny_free_reattach_region to use
// the depot as its target magazine, rather than magazine formerly associated with sparse_region
MAGAZINE_INDEX_FOR_TINY_REGION(sparse_region) = DEPOT_MAGAZINE_INDEX;
node->pinned_to_depot = 0;
// Iterate the region putting its free entries on Depot's free list
size_t bytes_inplay = tiny_free_reattach_region(rack, depot_ptr, sparse_region);
tiny_mag_ptr->mag_num_bytes_in_objects -= bytes_inplay;
tiny_mag_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES;
tiny_mag_ptr->mag_num_objects -= objects_in_use;
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr); // Unlock the originating magazine
depot_ptr->mag_num_bytes_in_objects += bytes_inplay;
depot_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES;
depot_ptr->mag_num_objects += objects_in_use;
// connect to Depot as last node
recirc_list_splice_last(rack, depot_ptr, node);
MAGMALLOC_RECIRCREGION(TINY_SZONE_FROM_RACK(rack), (int)mag_index, (void *)sparse_region, TINY_REGION_SIZE,
(int)BYTES_USED_FOR_TINY_REGION(sparse_region)); // DTrace USDT Probe
#if !CONFIG_AGGRESSIVE_MADVISE
// Mark free'd dirty pages with MADV_FREE to reduce memory pressure
tiny_free_scan_madvise_free(rack, depot_ptr, sparse_region);
#endif
// If the region is entirely empty vm_deallocate() it outside the depot lock
region_t r_dealloc = tiny_free_try_depot_unmap_no_lock(rack, depot_ptr, node);
SZONE_MAGAZINE_PTR_UNLOCK(depot_ptr);
if (r_dealloc) {
mvm_deallocate_pages(r_dealloc, TINY_REGION_SIZE, 0);
}
return FALSE; // Caller need not unlock the originating magazine
}
static MALLOC_INLINE boolean_t
tiny_free_try_recirc_to_depot(rack_t *rack,
magazine_t *tiny_mag_ptr,
mag_index_t mag_index,
region_t region,
void *headptr,
size_t headsize,
void *ptr,
msize_t msize)
{
region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(region);
size_t bytes_used = node->bytes_used;
/* FIXME: Would Uniprocessor benefit from recirc and MADV_FREE? */
if (rack->num_magazines == 1) { // Uniprocessor, single magazine, so no recirculation necessary
/* NOTHING */
return TRUE; // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr)
} else if (DEPOT_MAGAZINE_INDEX != mag_index) {
// Emptiness discriminant
if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) {
/* Region has crossed threshold from density to sparsity. Mark it "suitable" on the
* recirculation candidates list. */
node->recirc_suitable = TRUE;
} else {
/* After this free, we've found the region is still dense, so it must have been even more so before
* the free. That implies the region is already correctly marked. Do nothing. */
}
// Has the entire magazine crossed the "emptiness threshold"? If so, transfer a region
// from this magazine to the Depot. Choose a region that itself has crossed the emptiness threshold (i.e
// is at least fraction "f" empty.) Such a region will be marked "suitable" on the recirculation list.
size_t a = tiny_mag_ptr->num_bytes_in_magazine; // Total bytes allocated to this magazine
size_t u = tiny_mag_ptr->mag_num_bytes_in_objects; // In use (malloc'd) from this magaqzine
if (a - u > ((3 * TINY_REGION_PAYLOAD_BYTES) / 2) && u < DENSITY_THRESHOLD(a)) {
return tiny_free_do_recirc_to_depot(rack, tiny_mag_ptr, mag_index);
}
} else {
#if !CONFIG_AGGRESSIVE_MADVISE
// We are free'ing into the depot, so madvise as we do so unless we were madvising every incoming
// allocation anyway.
tiny_madvise_free_range_no_lock(rack, tiny_mag_ptr, region, headptr, headsize, ptr, msize);
#endif
if (0 < bytes_used || 0 < node->pinned_to_depot) {
/* Depot'd region is still live. Leave it in place on the Depot's recirculation list
* so as to avoid thrashing between the Depot's free list and a magazines's free list
* with detach_region/reattach_region */
} else {
/* Depot'd region is just now empty. Consider return to OS. */
region_t r_dealloc = tiny_free_try_depot_unmap_no_lock(rack, tiny_mag_ptr, node);
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
if (r_dealloc) {
mvm_deallocate_pages(r_dealloc, TINY_REGION_SIZE, 0);
}
return FALSE; // Caller need not unlock
}
}
return TRUE; // Caller must do SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr)
}
#endif // CONFIG_RECIRC_DEPOT
boolean_t
tiny_free_no_lock(rack_t *rack, magazine_t *tiny_mag_ptr, mag_index_t mag_index, region_t region, void *ptr, msize_t msize)
{
void *original_ptr = ptr;
size_t original_size = TINY_BYTES_FOR_MSIZE(msize);
void *next_block = ((unsigned char *)ptr + original_size);
msize_t previous_msize, next_msize;
void *previous;
tiny_free_list_t *big_free_block;
tiny_free_list_t *after_next_block;
tiny_free_list_t *before_next_block;
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_report(ASL_LEVEL_INFO, "in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
}
if (!msize) {
malloc_zone_error(rack->debug_flags, true,
"trying to free tiny block that is too small in tiny_free_no_lock(), ptr=%p, msize=%d\n",
ptr, msize);
}
#endif
// We try to coalesce this block with the preceeding one
previous = tiny_previous_preceding_free(ptr, &previous_msize);
if (previous) {
#if DEBUG_MALLOC
if (LOG(szone, ptr) || LOG(szone, previous)) {
malloc_report(ASL_LEVEL_INFO, "in tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous);
}
#endif
// clear the meta_header since this is no longer the start of a block
set_tiny_meta_header_middle(ptr);
tiny_free_list_remove_ptr(rack, tiny_mag_ptr, previous, previous_msize);
ptr = previous;
msize += previous_msize;
}
// We try to coalesce with the next block
if ((next_block < TINY_REGION_END(region)) && tiny_meta_header_is_free(next_block)) {
next_msize = get_tiny_free_size(next_block);
#if DEBUG_MALLOC
if (LOG(szone, ptr) || LOG(szone, next_block)) {
malloc_report(ASL_LEVEL_INFO, "in tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n", ptr, msize,
next_block, next_msize);
}
#endif
// If we are coalescing with the next block, and the next block is in
// the last slot of the free list, then we optimize this case here to
// avoid removing next_block from the slot NUM_TINY_SLOTS and then adding ptr back
// to slot NUM_TINY_SLOTS.
if (next_msize > NUM_TINY_SLOTS) {
msize += next_msize;
big_free_block = (tiny_free_list_t *)next_block;
after_next_block = free_list_unchecksum_ptr(rack, &big_free_block->next);
before_next_block = free_list_unchecksum_ptr(rack, &big_free_block->previous);
if (!before_next_block) {
tiny_mag_ptr->mag_free_list[NUM_TINY_SLOTS].p = ptr;
} else {
before_next_block->next.u = free_list_checksum_ptr(rack, ptr);
}
if (after_next_block) {
after_next_block->previous.u = free_list_checksum_ptr(rack, ptr);
}
// we don't need to checksum these since they are already checksummed
((tiny_free_list_t *)ptr)->previous = big_free_block->previous;
((tiny_free_list_t *)ptr)->next = big_free_block->next;
// clear the meta_header to enable coalescing backwards
set_tiny_meta_header_middle(big_free_block);
set_tiny_meta_header_free(ptr, msize);
goto tiny_free_ending;
}
tiny_free_list_remove_ptr(rack, tiny_mag_ptr, next_block, next_msize);
set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards
msize += next_msize;
}
// The tiny cache already scribbles free blocks as they go through the
// cache whenever msize < TINY_QUANTUM , so we do not need to do it here.
if ((rack->debug_flags & MALLOC_DO_SCRIBBLE) && msize && (msize >= TINY_QUANTUM)) {
memset(ptr, SCRABBLE_BYTE, TINY_BYTES_FOR_MSIZE(msize));
}
tiny_free_list_add_ptr(rack, tiny_mag_ptr, ptr, msize);
tiny_free_ending:
tiny_mag_ptr->mag_num_objects--;
// we use original_size and not msize to avoid double counting the coalesced blocks
tiny_mag_ptr->mag_num_bytes_in_objects -= original_size;
// Update this region's bytes in use count
region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(region);
size_t bytes_used = node->bytes_used - original_size;
node->bytes_used = (unsigned int)bytes_used;
#if CONFIG_AGGRESSIVE_MADVISE
// Platforms that want to madvise every freed allocation do so here, even if we continue
// on to use the recirc depot after.
tiny_madvise_free_range_no_lock(rack, tiny_mag_ptr, region, original_ptr, original_size, ptr, msize);
#endif
// Caller must do SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr) if this function
// returns TRUE.
boolean_t needs_unlock = TRUE;
#if CONFIG_RECIRC_DEPOT
needs_unlock = tiny_free_try_recirc_to_depot(rack, tiny_mag_ptr, mag_index, region, original_ptr, original_size, ptr, msize);
#endif
return needs_unlock;
}
// Allocates from the last region or a freshly allocated region
static void *
tiny_malloc_from_region_no_lock(rack_t *rack,
magazine_t *tiny_mag_ptr,
mag_index_t mag_index,
msize_t msize,
void *aligned_address)
{
void *ptr;
// Deal with unclaimed memory -- mag_bytes_free_at_end or mag_bytes_free_at_start
if (tiny_mag_ptr->mag_bytes_free_at_end || tiny_mag_ptr->mag_bytes_free_at_start) {
tiny_finalize_region(rack, tiny_mag_ptr);
}
// We set the unused bits of the header in the last pair to be all ones, and those of the inuse to zeroes.
#if NUM_TINY_BLOCKS & 31
const uint32_t header = 0xFFFFFFFFU << (NUM_TINY_BLOCKS & 31);
#else
const uint32_t header = 0;
#endif
((tiny_region_t)aligned_address)->pairs[CEIL_NUM_TINY_BLOCKS_WORDS - 1].header = header;
((tiny_region_t)aligned_address)->pairs[CEIL_NUM_TINY_BLOCKS_WORDS - 1].inuse = 0;
// Tag the region at "aligned_address" as belonging to us,
// and so put it under the protection of the magazine lock we are holding.
// Do this before advertising "aligned_address" on the hash ring(!)
MAGAZINE_INDEX_FOR_TINY_REGION(aligned_address) = mag_index;
// Insert the new region into the hash ring
rack_region_insert(rack, (region_t)aligned_address);
tiny_mag_ptr->mag_last_region = aligned_address;
BYTES_USED_FOR_TINY_REGION(aligned_address) = TINY_BYTES_FOR_MSIZE(msize);
#if CONFIG_ASLR_INTERNAL
int offset_msize = malloc_entropy[0] & TINY_ENTROPY_MASK;
#if DEBUG_MALLOC
if (getenv("MallocASLRForce")) {
offset_msize = strtol(getenv("MallocASLRForce"), NULL, 0) & TINY_ENTROPY_MASK;
}
if (getenv("MallocASLRPrint")) {
malloc_report(ASL_LEVEL_INFO, "Region: %p offset: %d\n", aligned_address, offset_msize);
}
#endif
#else
int offset_msize = 0;
#endif
ptr = (void *)((uintptr_t)aligned_address + TINY_BYTES_FOR_MSIZE(offset_msize));
set_tiny_meta_header_in_use(ptr, msize);
tiny_mag_ptr->mag_num_objects++;
tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(msize);
tiny_mag_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES;
// We put a header on the last block so that it appears in use (for coalescing, etc...)
set_tiny_meta_header_in_use_1((void *)((uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(msize)));
tiny_mag_ptr->mag_bytes_free_at_end = TINY_BYTES_FOR_MSIZE(NUM_TINY_BLOCKS - msize - offset_msize);
#if CONFIG_ASLR_INTERNAL
// Put a header on the previous block for same reason
tiny_mag_ptr->mag_bytes_free_at_start = TINY_BYTES_FOR_MSIZE(offset_msize);
if (offset_msize) {
set_tiny_meta_header_in_use_1((void *)((uintptr_t)ptr - TINY_QUANTUM));
}
#else
tiny_mag_ptr->mag_bytes_free_at_start = 0;
#endif
// connect to magazine as last node
recirc_list_splice_last(rack, tiny_mag_ptr, REGION_TRAILER_FOR_TINY_REGION(aligned_address));
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
}
#endif
return ptr;
}
void *
tiny_memalign(szone_t *szone, size_t alignment, size_t size, size_t span)
{
msize_t mspan = TINY_MSIZE_FOR_BYTES(span + TINY_QUANTUM - 1);
void *p = tiny_malloc_should_clear(&szone->tiny_rack, mspan, 0);
if (NULL == p) {
return NULL;
}
size_t offset = ((uintptr_t)p) & (alignment - 1); // p % alignment
size_t pad = (0 == offset) ? 0 : alignment - offset; // p + pad achieves desired alignment
msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
msize_t mpad = TINY_MSIZE_FOR_BYTES(pad + TINY_QUANTUM - 1);
msize_t mwaste = mspan - msize - mpad; // excess blocks
if (mpad > 0) {
void *q = (void *)(((uintptr_t)p) + pad);
// Mark q as a block header and in-use, thus creating two blocks.
magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone->tiny_rack.magazines,
REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)),
MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)));
set_tiny_meta_header_in_use(q, msize);
tiny_mag_ptr->mag_num_objects++;
// set_tiny_meta_header_in_use() "reaffirms" the block_header on the *following* block, so
// now set its in_use bit as well. But only if its within the original allocation made above.
if (mwaste > 0) {
BITARRAY_SET(TINY_INUSE_FOR_HEADER(TINY_BLOCK_HEADER_FOR_PTR(q)), TINY_INDEX_FOR_PTR(q) + msize);
}
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
// Give up mpad blocks beginning at p to the tiny free list
free_tiny(&szone->tiny_rack, p, TINY_REGION_FOR_PTR(p), TINY_BYTES_FOR_MSIZE(mpad));
p = q; // advance p to the desired alignment
}
if (mwaste > 0) {
void *q = (void *)(((uintptr_t)p) + TINY_BYTES_FOR_MSIZE(msize));
// Mark q as block header and in-use, thus creating two blocks.
magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone->tiny_rack.magazines,
REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)),
MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)));
set_tiny_meta_header_in_use(q, mwaste);
tiny_mag_ptr->mag_num_objects++;
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
// Give up mwaste blocks beginning at q to the tiny free list
free_tiny(&szone->tiny_rack, q, TINY_REGION_FOR_PTR(q), TINY_BYTES_FOR_MSIZE(mwaste));
}
return p; // p has the desired size and alignment, and can later be free()'d
}
boolean_t
tiny_claimed_address(rack_t *rack, void *ptr)
{
region_t r = tiny_region_for_ptr_no_lock(rack, ptr);
return r && ptr < TINY_REGION_END(r);
}
void *
tiny_try_shrink_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_good_size)
{
msize_t new_msize = TINY_MSIZE_FOR_BYTES(new_good_size);
msize_t mshrinkage = TINY_MSIZE_FOR_BYTES(old_size) - new_msize;
if (mshrinkage) {
void *q = (void *)((uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(new_msize));
magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines,
REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)),
MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)));
// Mark q as block header and in-use, thus creating two blocks.
set_tiny_meta_header_in_use(q, mshrinkage);
tiny_mag_ptr->mag_num_objects++;
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
free_tiny(rack, q, TINY_REGION_FOR_PTR(q), 0);
}
return ptr;
}
boolean_t
tiny_try_realloc_in_place(rack_t *rack, void *ptr, size_t old_size, size_t new_size)
{
// returns 1 on success
msize_t index;
msize_t old_msize;
unsigned next_index;
void *next_block;
boolean_t is_free;
msize_t next_msize, coalesced_msize, leftover_msize, new_msize;
void *leftover;
index = TINY_INDEX_FOR_PTR(ptr);
old_msize = TINY_MSIZE_FOR_BYTES(old_size);
new_msize = TINY_MSIZE_FOR_BYTES(new_size + TINY_QUANTUM - 1);
next_index = index + old_msize;
if (next_index >= NUM_TINY_BLOCKS) {
return 0;
}
next_block = (char *)ptr + old_size;
magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(rack->magazines,
REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)),
MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)));
if (DEPOT_MAGAZINE_INDEX == MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr))) {
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
return 0;
}
coalesced_msize = new_msize - old_msize;
#if CONFIG_TINY_CACHE
void *last_free_ptr = tiny_mag_ptr->mag_last_free;
msize_t last_free_msize = tiny_mag_ptr->mag_last_free_msize;
if (last_free_ptr == next_block && old_msize + last_free_msize >= new_msize) {
/*
* There is a block in mag_last_free and it's immediately after
* this block and it's large enough. We can use some or all of it.
*/
leftover_msize = last_free_msize - coalesced_msize;
if (leftover_msize) {
tiny_mag_ptr->mag_last_free_msize -= coalesced_msize;
tiny_mag_ptr->mag_last_free += new_size - old_size;
// The block in mag_last_free is still marked as header and in-use, so copy that
// state to the block that remains. The state for the block that we're going to
// use is adjusted by the set_tiny_meta_header_middle() call below.
set_tiny_meta_header_in_use(next_block + TINY_BYTES_FOR_MSIZE(coalesced_msize), leftover_msize);
} else {
// Using the whole block.
tiny_mag_ptr->mag_last_free = NULL;
tiny_mag_ptr->mag_last_free_msize = 0;
tiny_mag_ptr->mag_last_free_rgn = NULL;
}
set_tiny_meta_header_middle(next_block);
} else {
#endif // CONFIG_TINY_CACHE
/*
* Try to expand into unused space immediately after this block.
*/
msize_t unused_msize = TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_end);
void *unused_start = TINY_REGION_END(TINY_REGION_FOR_PTR(ptr)) - tiny_mag_ptr->mag_bytes_free_at_end;
if (tiny_mag_ptr->mag_last_region == TINY_REGION_FOR_PTR(ptr)
&& coalesced_msize < unused_msize && unused_start == ptr + old_size) {
// The block at the start of mag_bytes_free_at_end is marked as
// header/in-use and the next one has header/free. We need to
// reset both the header and in-use bit in the first block and we
// need to reset the header bit in the second block if it's part of
// the new allocation.
set_tiny_meta_header_middle(unused_start);
if (coalesced_msize > 1) {
set_tiny_meta_header_middle(unused_start + TINY_QUANTUM);
}
tiny_mag_ptr->mag_bytes_free_at_end -= TINY_BYTES_FOR_MSIZE(coalesced_msize);
if (tiny_mag_ptr->mag_bytes_free_at_end) {
// Mark the first block of the remaining free area as a header and in-use.
set_tiny_meta_header_in_use_1(ptr + TINY_BYTES_FOR_MSIZE(new_msize));
}
} else {
/*
* Look for a free block immediately afterwards. If it's large
* enough, we can consume (part of) it.
*/
is_free = tiny_meta_header_is_free(next_block);
if (!is_free) {
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
return 0; // next_block is in use;
}
next_msize = get_tiny_free_size(next_block);
if (old_msize + next_msize < new_msize) {
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
return 0; // even with next block, not enough
}
/*
* The following block is big enough; pull it from its freelist and chop off enough to satisfy
* our needs.
*/
tiny_free_list_remove_ptr(rack, tiny_mag_ptr, next_block, next_msize);
set_tiny_meta_header_middle(next_block); // clear the meta_header to enable coalescing backwards
leftover_msize = next_msize - coalesced_msize;
if (leftover_msize) {
/* there's some left, so put the remainder back */
leftover = (void *)((uintptr_t)next_block + TINY_BYTES_FOR_MSIZE(coalesced_msize));
tiny_free_list_add_ptr(rack, tiny_mag_ptr, leftover, leftover_msize);
}
set_tiny_meta_header_in_use(ptr, old_msize + coalesced_msize);
}
#if CONFIG_TINY_CACHE
}
#endif // CONFIG_TINY_CACHE
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_report(ASL_LEVEL_INFO, "in tiny_try_realloc_in_place(), ptr=%p, msize=%d\n", ptr, old_msize + coalesced_msize);
}
#endif
tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(coalesced_msize);
// Update this region's bytes in use count
region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
size_t bytes_used = node->bytes_used + TINY_BYTES_FOR_MSIZE(coalesced_msize);
node->bytes_used = (unsigned int)bytes_used;
// Emptiness discriminant
if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) {
/* After this reallocation the region is still sparse, so it must have been even more so before
* the reallocation. That implies the region is already correctly marked. Do nothing. */
} else {
/* Region has crossed threshold from sparsity to density. Mark it not "suitable" on the
* recirculation candidates list. */
node->recirc_suitable = FALSE;
}
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
return 1;
}
static char *tiny_check_fail_msg = "*** check: incorrect tiny region ";
#define TINY_CHECK_FAIL(fmt, ...) \
malloc_zone_check_fail(tiny_check_fail_msg, \
"%ld, counter=%d\n" fmt, region_index, counter, __VA_ARGS__);
boolean_t
tiny_check_region(rack_t *rack, region_t region, size_t region_index,
unsigned counter)
{
uintptr_t start, ptr, region_end;
boolean_t prev_free = 0;
boolean_t is_free;
msize_t msize;
tiny_free_list_t *free_head;
void *follower, *previous, *next;
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(region);
magazine_t *tiny_mag_ptr = &(rack->magazines[mag_index]);
// Assumes locked
CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__);
// Do not check the region if pinned_to_depot is not zero because it
// may not be in a consistent state (specifically, if may have a
// block marked as in-use that's longer than any legal allocation,
// which upsets get_tiny_meta_header() because it can't determine the
// block's length).
if (!REGION_TRAILER_FOR_TINY_REGION(region)->pinned_to_depot) {
return 1;
}
/* establish region limits */
start = (uintptr_t)TINY_REGION_ADDRESS(region);
ptr = start;
if (region == tiny_mag_ptr->mag_last_region) {
ptr += tiny_mag_ptr->mag_bytes_free_at_start;
/*
* Check the leading block's integrity here also.
*/
if (tiny_mag_ptr->mag_bytes_free_at_start) {
msize = get_tiny_meta_header((void *)(ptr - TINY_QUANTUM), &is_free);
if (is_free || (msize != 1)) {
TINY_CHECK_FAIL("*** invariant broken for leader block %p - %d %d\n",
(void *)(ptr - TINY_QUANTUM), msize, is_free);
return 0;
}
}
}
region_end = (uintptr_t)TINY_REGION_END(region);
/*
* The last region may have a trailing chunk which has not been converted into inuse/freelist
* blocks yet.
*/
if (region == tiny_mag_ptr->mag_last_region) {
region_end -= tiny_mag_ptr->mag_bytes_free_at_end;
}
/*
* Scan blocks within the region.
*/
while (ptr < region_end) {
/*
* If the first block is free, and its size is 65536 (msize = 0) then the entire region is
* free.
*/
msize = get_tiny_meta_header((void *)ptr, &is_free);
if (is_free && !msize && (ptr == start)) {
return 1;
}
/*
* If the block's size is 65536 (msize = 0) then since we're not the first entry the size is
* corrupt.
*/
if (!msize) {
TINY_CHECK_FAIL("*** invariant broken for tiny block %p this msize=%d - size is too small\n", (void *)ptr, msize);
return 0;
}
if (!is_free) {
/*
* In use blocks cannot be more than NUM_TINY_SLOTS quanta large.
*/
prev_free = 0;
if (msize > NUM_TINY_SLOTS) {
TINY_CHECK_FAIL("*** invariant broken for %p this tiny msize=%d - size is too large\n", (void *)ptr, msize);
return 0;
}
/* move to next block */
ptr += TINY_BYTES_FOR_MSIZE(msize);
} else {
#if !CONFIG_RELAXED_INVARIANT_CHECKS
/*
* Free blocks must have been coalesced, we cannot have a free block following another
* free block.
*/
if (prev_free) {
TINY_CHECK_FAIL("*** invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n", (void *)ptr, msize);
return 0;
}
#endif // CONFIG_RELAXED_INVARIANT_CHECKS
prev_free = 1;
/*
* Check the integrity of this block's entry in its freelist.
*/
free_head = (tiny_free_list_t *)ptr;
previous = free_list_unchecksum_ptr(rack, &free_head->previous);
next = free_list_unchecksum_ptr(rack, &free_head->next);
if (previous && !tiny_meta_header_is_free(previous)) {
TINY_CHECK_FAIL("*** invariant broken for %p (previous %p is not a free pointer)\n", (void *)ptr, previous);
return 0;
}
if (next && !tiny_meta_header_is_free(next)) {
TINY_CHECK_FAIL("*** invariant broken for %p (next in free list %p is not a free pointer)\n", (void *)ptr, next);
return 0;
}
/*
* Check the free block's trailing size value.
*/
follower = FOLLOWING_TINY_PTR(ptr, msize);
if (((uintptr_t)follower != region_end) && (get_tiny_previous_free_msize(follower) != msize)) {
TINY_CHECK_FAIL("*** invariant broken for tiny free %p followed by %p in region [%p-%p] "
"(end marker incorrect) should be %d; in fact %d\n",
(void *)ptr, follower, TINY_REGION_ADDRESS(region), (void *)region_end,
msize, get_tiny_previous_free_msize(follower));
return 0;
}
/* move to next block */
ptr = (uintptr_t)follower;
}
}
/*
* Ensure that we scanned the entire region
*/
if (ptr != region_end) {
TINY_CHECK_FAIL("*** invariant broken for region end %p - %p\n", (void *)ptr, (void *)region_end);
return 0;
}
/*
* Check the trailing block's integrity.
*/
if (region == tiny_mag_ptr->mag_last_region) {
if (tiny_mag_ptr->mag_bytes_free_at_end) {
msize = get_tiny_meta_header((void *)ptr, &is_free);
if (is_free || (msize != 1)) {
TINY_CHECK_FAIL("*** invariant broken for blocker block %p - %d %d\n", (void *)ptr, msize, is_free);
return 0;
}
}
}
return 1;
}
kern_return_t
tiny_in_use_enumerator(task_t task,
void *context,
unsigned type_mask,
szone_t *szone,
memory_reader_t reader,
vm_range_recorder_t recorder)
{
size_t num_regions;
size_t index;
region_t *regions;
vm_range_t buffer[MAX_RECORDER_BUFFER];
unsigned count = 0;
kern_return_t err;
region_t region;
vm_range_t range;
vm_range_t admin_range;
vm_range_t ptr_range;
unsigned char *mapped_region;
uint32_t *block_header;
uint32_t *in_use;
unsigned block_index;
unsigned block_limit;
boolean_t is_free;
msize_t msize;
void *mapped_ptr;
unsigned bit;
magazine_t *tiny_mag_base = NULL;
region_hash_generation_t *trg_ptr;
err = reader(task, (vm_address_t)szone->tiny_rack.region_generation, sizeof(region_hash_generation_t), (void **)&trg_ptr);
if (err) {
return err;
}
num_regions = trg_ptr->num_regions_allocated;
err = reader(task, (vm_address_t)trg_ptr->hashed_regions, sizeof(region_t) * num_regions, (void **)®ions);
if (err) {
return err;
}
if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
// Map in all active magazines. Do this outside the iteration over regions.
err = reader(task, (vm_address_t)(szone->tiny_rack.magazines), szone->tiny_rack.num_magazines * sizeof(magazine_t),
(void **)&tiny_mag_base);
if (err) {
return err;
}
}
for (index = 0; index < num_regions; ++index) {
region = regions[index];
if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) {
range.address = (vm_address_t)TINY_REGION_ADDRESS(region);
range.size = (vm_size_t)TINY_REGION_SIZE;
if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) {
admin_range.address = range.address + TINY_METADATA_START;
admin_range.size = TINY_METADATA_SIZE;
recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1);
}
if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) {
ptr_range.address = range.address;
ptr_range.size = NUM_TINY_BLOCKS * TINY_QUANTUM;
recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1);
}
if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
vm_address_t mag_last_free;
msize_t mag_last_free_msize = 0;
err = reader(task, range.address, range.size, (void **)&mapped_region);
if (err) {
return err;
}
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(mapped_region);
magazine_t *tiny_mag_ptr = tiny_mag_base + mag_index;
if (DEPOT_MAGAZINE_INDEX != mag_index) {
mag_last_free = (uintptr_t)tiny_mag_ptr->mag_last_free;
mag_last_free_msize = tiny_mag_ptr->mag_last_free_msize;
} else {
for (mag_index = 0; mag_index < szone->tiny_rack.num_magazines; mag_index++) {
if ((void *)range.address == (tiny_mag_base + mag_index)->mag_last_free_rgn) {
mag_last_free = (uintptr_t)(tiny_mag_base + mag_index)->mag_last_free;
mag_last_free_msize = (tiny_mag_base + mag_index)->mag_last_free_msize;
}
}
}
block_header = (uint32_t *)(mapped_region + TINY_METADATA_START + sizeof(region_trailer_t));
in_use = TINY_INUSE_FOR_HEADER(block_header);
block_index = 0;
block_limit = NUM_TINY_BLOCKS;
if (region == tiny_mag_ptr->mag_last_region) {
block_index += TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_start);
block_limit -= TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_end);
}
while (block_index < block_limit) {
vm_size_t block_offset = TINY_BYTES_FOR_MSIZE(block_index);
is_free = !BITARRAY_BIT(in_use, block_index);
if (is_free) {
mapped_ptr = mapped_region + block_offset;
// mapped_region, the address at which 'range' in 'task' has been
// mapped into our process, is not necessarily aligned to
// TINY_BLOCKS_ALIGN.
//
// Since the code in get_tiny_free_size() assumes the pointer came
// from a properly aligned tiny region, and mapped_region is not
// necessarily aligned, then do the size calculation directly.
// If the next bit is set in the header bitmap, then the size is one
// quantum. Otherwise, read the size field.
if (!BITARRAY_BIT(block_header, (block_index + 1))) {
msize = TINY_FREE_SIZE(mapped_ptr);
} else {
msize = 1;
}
} else if (range.address + block_offset != mag_last_free) {
msize = 1;
bit = block_index + 1;
while (!BITARRAY_BIT(block_header, bit)) {
bit++;
msize++;
}
buffer[count].address = range.address + block_offset;
buffer[count].size = TINY_BYTES_FOR_MSIZE(msize);
count++;
if (count >= MAX_RECORDER_BUFFER) {
recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
count = 0;
}
} else {
// Block is not free but it matches mag_last_free_ptr so even
// though it is not marked free in the bitmap, we treat it as if
// it is and move on
msize = mag_last_free_msize;
}
if (!msize) {
return KERN_FAILURE; // Somethings amiss. Avoid looping at this block_index.
}
block_index += msize;
}
if (count) {
recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
count = 0;
}
}
}
}
return 0;
}
void *
tiny_malloc_from_free_list(rack_t *rack, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize)
{
tiny_free_list_t *ptr;
msize_t this_msize;
grain_t slot = tiny_slot_from_msize(msize);
free_list_t *free_list = tiny_mag_ptr->mag_free_list;
free_list_t *the_slot = free_list + slot;
tiny_free_list_t *next;
free_list_t *limit;
#if defined(__LP64__)
uint64_t bitmap;
#else
uint32_t bitmap;
#endif
msize_t leftover_msize;
tiny_free_list_t *leftover_ptr;
// Assumes we've locked the region
CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__);
// Look for an exact match by checking the freelist for this msize.
//
ptr = the_slot->p;
if (ptr) {
next = free_list_unchecksum_ptr(rack, &ptr->next);
if (next) {
next->previous = ptr->previous;
} else {
BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot);
}
the_slot->p = next;
this_msize = msize;
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr, this_msize);
}
#endif
goto return_tiny_alloc;
}
// Mask off the bits representing slots holding free blocks smaller than the
// size we need. If there are no larger free blocks, try allocating from
// the free space at the end of the tiny region.
#if defined(__LP64__)
bitmap = ((uint64_t *)(tiny_mag_ptr->mag_bitmap))[0] & ~((1ULL << slot) - 1);
#else
bitmap = tiny_mag_ptr->mag_bitmap[0] & ~((1 << slot) - 1);
#endif
if (!bitmap) {
goto try_tiny_malloc_from_end;
}
slot = BITMAPV_CTZ(bitmap);
limit = free_list + NUM_TINY_SLOTS;
free_list += slot;
if (free_list < limit) {
ptr = free_list->p;
if (ptr) {
next = free_list_unchecksum_ptr(rack, &ptr->next);
free_list->p = next;
if (next) {
next->previous = ptr->previous;
} else {
BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot);
}
this_msize = get_tiny_free_size(ptr);
goto add_leftover_and_proceed;
}
#if DEBUG_MALLOC
malloc_report(ASL_LEVEL_ERR, "in tiny_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n", slot);
#endif
}
// We are now looking at the last slot, which contains blocks equal to, or
// due to coalescing of free blocks, larger than NUM_TINY_SLOTS * tiny quantum size.
// If the last freelist is not empty, and the head contains a block that is
// larger than our request, then the remainder is put back on the free list.
ptr = limit->p;
if (ptr) {
this_msize = get_tiny_free_size(ptr);
next = free_list_unchecksum_ptr(rack, &ptr->next);
if (this_msize - msize > NUM_TINY_SLOTS) {
// the leftover will go back to the free list, so we optimize by
// modifying the free list rather than a pop and push of the head
leftover_msize = this_msize - msize;
leftover_ptr = (tiny_free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
limit->p = leftover_ptr;
if (next) {
next->previous.u = free_list_checksum_ptr(rack, leftover_ptr);
}
leftover_ptr->previous = ptr->previous;
leftover_ptr->next = ptr->next;
set_tiny_meta_header_free(leftover_ptr, leftover_msize);
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_report(ASL_LEVEL_INFO,
"in tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize);
}
#endif
this_msize = msize;
goto return_tiny_alloc;
}
if (next) {
next->previous = ptr->previous;
}
limit->p = next;
goto add_leftover_and_proceed;
/* NOTREACHED */
}
try_tiny_malloc_from_end:
// Let's see if we can use tiny_mag_ptr->mag_bytes_free_at_end
if (tiny_mag_ptr->mag_bytes_free_at_end >= TINY_BYTES_FOR_MSIZE(msize)) {
ptr = (tiny_free_list_t *)((uintptr_t)TINY_REGION_END(tiny_mag_ptr->mag_last_region) - tiny_mag_ptr->mag_bytes_free_at_end);
tiny_mag_ptr->mag_bytes_free_at_end -= TINY_BYTES_FOR_MSIZE(msize);
if (tiny_mag_ptr->mag_bytes_free_at_end) {
// let's add an in use block after ptr to serve as boundary
set_tiny_meta_header_in_use_1((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
}
this_msize = msize;
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr, msize);
}
#endif
goto return_tiny_alloc;
}
#if CONFIG_ASLR_INTERNAL
// Try from start if nothing left at end
if (tiny_mag_ptr->mag_bytes_free_at_start >= TINY_BYTES_FOR_MSIZE(msize)) {
ptr = (tiny_free_list_t *)(TINY_REGION_ADDRESS(tiny_mag_ptr->mag_last_region) + tiny_mag_ptr->mag_bytes_free_at_start -
TINY_BYTES_FOR_MSIZE(msize));
tiny_mag_ptr->mag_bytes_free_at_start -= TINY_BYTES_FOR_MSIZE(msize);
if (tiny_mag_ptr->mag_bytes_free_at_start) {
// let's add an in use block before ptr to serve as boundary
set_tiny_meta_header_in_use_1((unsigned char *)ptr - TINY_QUANTUM);
}
this_msize = msize;
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_from_free_list(), from start ptr=%p, msize=%d\n", ptr, msize);
}
#endif
goto return_tiny_alloc;
}
#endif
return NULL;
add_leftover_and_proceed:
if (!this_msize || (this_msize > msize)) {
leftover_msize = this_msize - msize;
leftover_ptr = (tiny_free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize);
}
#endif
tiny_free_list_add_ptr(rack, tiny_mag_ptr, leftover_ptr, leftover_msize);
this_msize = msize;
}
return_tiny_alloc:
tiny_mag_ptr->mag_num_objects++;
tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(this_msize);
// Update this region's bytes in use count
region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
size_t bytes_used = node->bytes_used + TINY_BYTES_FOR_MSIZE(this_msize);
node->bytes_used = (unsigned int)bytes_used;
// Emptiness discriminant
if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) {
/* After this allocation the region is still sparse, so it must have been even more so before
* the allocation. That implies the region is already correctly marked. Do nothing. */
} else {
/* Region has crossed threshold from sparsity to density. Mark it not "suitable" on the
* recirculation candidates list. */
node->recirc_suitable = FALSE;
}
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize);
}
#endif
if (this_msize > 1) {
set_tiny_meta_header_in_use(ptr, this_msize);
} else {
set_tiny_meta_header_in_use_1(ptr);
}
return ptr;
}
void *
tiny_malloc_should_clear(rack_t *rack, msize_t msize, boolean_t cleared_requested)
{
void *ptr;
mag_index_t mag_index = tiny_mag_get_thread_index() % rack->num_magazines;
magazine_t *tiny_mag_ptr = &(rack->magazines[mag_index]);
MALLOC_TRACE(TRACE_tiny_malloc, (uintptr_t)rack, TINY_BYTES_FOR_MSIZE(msize), (uintptr_t)tiny_mag_ptr, cleared_requested);
#if DEBUG_MALLOC
if (DEPOT_MAGAZINE_INDEX == mag_index) {
malloc_zone_error(rack->debug_flags, true, "malloc called for magazine index -1\n");
return (NULL);
}
if (!msize) {
malloc_zone_error(rack->debug_flags, true, "invariant broken (!msize) in allocation (region)\n");
return (NULL);
}
#endif
SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr);
#if CONFIG_TINY_CACHE
ptr = tiny_mag_ptr->mag_last_free;
if (tiny_mag_ptr->mag_last_free_msize == msize) {
// we have a winner
tiny_mag_ptr->mag_last_free = NULL;
tiny_mag_ptr->mag_last_free_msize = 0;
tiny_mag_ptr->mag_last_free_rgn = NULL;
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
if (cleared_requested) {
memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
}
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_report(ASL_LEVEL_INFO, "in tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr, msize);
}
#endif
return ptr;
}
#endif /* CONFIG_TINY_CACHE */
while (1) {
ptr = tiny_malloc_from_free_list(rack, tiny_mag_ptr, mag_index, msize);
if (ptr) {
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
if (cleared_requested) {
memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
}
return ptr;
}
if (tiny_get_region_from_depot(rack, tiny_mag_ptr, mag_index, msize)) {
ptr = tiny_malloc_from_free_list(rack, tiny_mag_ptr, mag_index, msize);
if (ptr) {
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
if (cleared_requested) {
memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
}
return ptr;
}
}
// The magazine is exhausted. A new region (heap) must be allocated to satisfy this call to malloc().
// The allocation, an mmap() system call, will be performed outside the magazine spin locks by the first
// thread that suffers the exhaustion. That thread sets "alloc_underway" and enters a critical section.
// Threads arriving here later are excluded from the critical section, yield the CPU, and then retry the
// allocation. After some time the magazine is resupplied, the original thread leaves with its allocation,
// and retry-ing threads succeed in the code just above.
if (!tiny_mag_ptr->alloc_underway) {
void *fresh_region;
// time to create a new region (do this outside the magazine lock)
tiny_mag_ptr->alloc_underway = TRUE;
OSMemoryBarrier();
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
fresh_region = mvm_allocate_pages_securely(TINY_REGION_SIZE, TINY_BLOCKS_ALIGN, VM_MEMORY_MALLOC_TINY, rack->debug_flags);
SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr);
// DTrace USDT Probe
MAGMALLOC_ALLOCREGION(TINY_SZONE_FROM_RACK(rack), (int)mag_index, fresh_region, TINY_REGION_SIZE);
if (!fresh_region) { // out of memory!
tiny_mag_ptr->alloc_underway = FALSE;
OSMemoryBarrier();
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
return NULL;
}
ptr = tiny_malloc_from_region_no_lock(rack, tiny_mag_ptr, mag_index, msize, fresh_region);
// we don't clear because this freshly allocated space is pristine
tiny_mag_ptr->alloc_underway = FALSE;
OSMemoryBarrier();
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
return ptr;
} else {
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
yield();
SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr);
}
}
/* NOTREACHED */
}
size_t
tiny_size(rack_t *rack, const void *ptr)
{
if (tiny_region_for_ptr_no_lock(rack, ptr)) {
if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) {
return 0;
}
boolean_t is_free;
msize_t msize = get_tiny_meta_header(ptr, &is_free);
if (is_free) {
return 0;
}
#if CONFIG_TINY_CACHE
{
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
if (DEPOT_MAGAZINE_INDEX != mag_index) {
magazine_t *tiny_mag_ptr = &rack->magazines[mag_index];
if (msize < TINY_QUANTUM && ptr == tiny_mag_ptr->mag_last_free) {
return 0;
}
} else {
for (mag_index = 0; mag_index < rack->num_magazines; mag_index++) {
magazine_t *tiny_mag_ptr = &(rack->magazines[mag_index]);
if (msize < TINY_QUANTUM && ptr == tiny_mag_ptr->mag_last_free) {
return 0;
}
}
}
}
#endif
return TINY_BYTES_FOR_MSIZE(msize);
}
return 0;
}
static MALLOC_NOINLINE void
free_tiny_botch(rack_t *rack, tiny_free_list_t *ptr)
{
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
magazine_t *tiny_mag_ptr = &(rack->magazines[mag_index]);
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
malloc_zone_error(rack->debug_flags, true, "Double free of object %p\n", ptr);
}
void
free_tiny(rack_t *rack, void *ptr, region_t tiny_region, size_t known_size)
{
msize_t msize;
boolean_t is_free;
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region);
magazine_t *tiny_mag_ptr = &(rack->magazines[mag_index]);
MALLOC_TRACE(TRACE_tiny_free, (uintptr_t)rack, (uintptr_t)ptr, (uintptr_t)tiny_mag_ptr, known_size);
// ptr is known to be in tiny_region
if (known_size) {
msize = TINY_MSIZE_FOR_BYTES(known_size + TINY_QUANTUM - 1);
} else {
msize = get_tiny_meta_header(ptr, &is_free);
if (is_free) {
free_tiny_botch(rack, ptr);
return;
}
}
#if DEBUG_MALLOC
if (!msize) {
malloc_report(ASL_LEVEL_ERR, "*** free_tiny() block in use is too large: %p\n", ptr);
return;
}
#endif
SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr);
#if CONFIG_TINY_CACHE
// Depot does not participate in CONFIG_TINY_CACHE since it can't be directly malloc()'d
if (DEPOT_MAGAZINE_INDEX != mag_index) {
if (msize < TINY_QUANTUM) { // to see if the bits fit in the last 4 bits
void *ptr2 = tiny_mag_ptr->mag_last_free; // Might be NULL
msize_t msize2 = tiny_mag_ptr->mag_last_free_msize;
region_t rgn2 = tiny_mag_ptr->mag_last_free_rgn;
/* check that we don't already have this pointer in the cache */
if (ptr == ptr2) {
free_tiny_botch(rack, ptr);
return;
}
if ((rack->debug_flags & MALLOC_DO_SCRIBBLE) && msize) {
memset(ptr, SCRABBLE_BYTE, TINY_BYTES_FOR_MSIZE(msize));
}
tiny_mag_ptr->mag_last_free = ptr;
tiny_mag_ptr->mag_last_free_msize = msize;
tiny_mag_ptr->mag_last_free_rgn = tiny_region;
if (!ptr2) {
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
return;
}
msize = msize2;
ptr = ptr2;
tiny_region = rgn2;
}
}
#endif /* CONFIG_TINY_CACHE */
// Now in the time it took to acquire the lock, the region may have migrated
// from one magazine to another. I.e. trailer->mag_index is volatile.
// In which case the magazine lock we obtained (namely magazines[mag_index].mag_lock)
// is stale. If so, keep on tryin' ...
region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(tiny_region);
mag_index_t refreshed_index;
while (mag_index != (refreshed_index = trailer->mag_index)) { // Note assignment
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
mag_index = refreshed_index;
tiny_mag_ptr = &(rack->magazines[mag_index]);
SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr);
}
if (tiny_free_no_lock(rack, tiny_mag_ptr, mag_index, tiny_region, ptr, msize)) {
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
}
CHECK(szone, __PRETTY_FUNCTION__);
}
unsigned
tiny_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count)
{
msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
unsigned found = 0;
mag_index_t mag_index = tiny_mag_get_thread_index() % szone->tiny_rack.num_magazines;
magazine_t *tiny_mag_ptr = &(szone->tiny_rack.magazines[mag_index]);
// make sure to return objects at least one quantum in size
if (!msize) {
msize = 1;
}
CHECK(szone, __PRETTY_FUNCTION__);
// We must lock the zone now, since tiny_malloc_from_free_list assumes that
// the caller has done so.
SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr);
// with the zone locked, allocate objects from the free list until all
// sufficiently large objects have been exhausted, or we have met our quota
// of objects to allocate.
while (found < count) {
void *ptr = tiny_malloc_from_free_list(&szone->tiny_rack, tiny_mag_ptr, mag_index, msize);
if (!ptr) {
break;
}
*results++ = ptr;
found++;
}
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
return found;
}
void
tiny_batch_free(szone_t *szone, void **to_be_freed, unsigned count)
{
unsigned cc = 0;
void *ptr;
region_t tiny_region = NULL;
boolean_t is_free;
msize_t msize;
magazine_t *tiny_mag_ptr = NULL;
mag_index_t mag_index = -1;
// frees all the pointers in to_be_freed
// note that to_be_freed may be overwritten during the process
if (!count) {
return;
}
CHECK(szone, __PRETTY_FUNCTION__);
while (cc < count) {
ptr = to_be_freed[cc];
if (ptr) {
if (NULL == tiny_region || tiny_region != TINY_REGION_FOR_PTR(ptr)) { // region same as last iteration?
if (tiny_mag_ptr) { // non-NULL iff magazine lock taken
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
tiny_mag_ptr = NULL;
}
tiny_region = tiny_region_for_ptr_no_lock(&szone->tiny_rack, ptr);
if (tiny_region) {
tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone->tiny_rack.magazines,
REGION_TRAILER_FOR_TINY_REGION(tiny_region),
MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region));
mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region);
}
}
if (tiny_region) {
// this is a tiny pointer
if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) {
break; // pointer to metadata; let the standard free deal with it
}
msize = get_tiny_meta_header(ptr, &is_free);
if (is_free) {
break; // a double free; let the standard free deal with it
}
if (!tiny_free_no_lock(&szone->tiny_rack, tiny_mag_ptr, mag_index, tiny_region, ptr, msize)) {
// Arrange to re-acquire magazine lock
tiny_mag_ptr = NULL;
tiny_region = NULL;
}
to_be_freed[cc] = NULL;
} else {
// No region in this zone claims ptr; let the standard free deal with it
break;
}
}
cc++;
}
if (tiny_mag_ptr) {
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
tiny_mag_ptr = NULL;
}
}
void
print_tiny_free_list(rack_t *rack)
{
tiny_free_list_t *ptr;
_SIMPLE_STRING b = _simple_salloc();
mag_index_t mag_index;
if (b) {
_simple_sappend(b, "tiny free sizes:\n");
for (mag_index = -1; mag_index < rack->num_magazines; mag_index++) {
grain_t slot = 0;
_simple_sprintf(b, "\tMagazine %d: ", mag_index);
while (slot < NUM_TINY_SLOTS) {
ptr = rack->magazines[mag_index].mag_free_list[slot].p;
if (ptr) {
_simple_sprintf(b, "%s%y[%d]; ", (slot == NUM_TINY_SLOTS) ? ">=" : "", (slot + 1) * TINY_QUANTUM,
free_list_count(rack, (free_list_t){ .p = ptr }));
}
slot++;
}
_simple_sappend(b, "\n");
}
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "%s\n", _simple_string(b));
_simple_sfree(b);
}
}
void
print_tiny_region(boolean_t verbose, region_t region, size_t bytes_at_start, size_t bytes_at_end)
{
unsigned counts[1024];
unsigned in_use = 0;
uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(region);
uintptr_t current = start + bytes_at_start;
uintptr_t limit = (uintptr_t)TINY_REGION_END(region) - bytes_at_end;
boolean_t is_free;
msize_t msize;
unsigned ci;
_SIMPLE_STRING b;
uintptr_t pgTot = 0;
if (region == HASHRING_REGION_DEALLOCATED) {
if ((b = _simple_salloc()) != NULL) {
_simple_sprintf(b, "Tiny region [unknown address] was returned to the OS\n");
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "%s\n", _simple_string(b));
_simple_sfree(b);
}
return;
}
memset(counts, 0, sizeof(counts));
while (current < limit) {
msize = get_tiny_meta_header((void *)current, &is_free);
if (is_free && !msize && (current == start)) {
// first block is all free
uintptr_t pgLo = round_page_quanta(start + sizeof(tiny_free_list_t) + sizeof(msize_t));
uintptr_t pgHi = trunc_page_quanta(start + TINY_REGION_SIZE - sizeof(msize_t));
if (pgLo < pgHi) {
pgTot += (pgHi - pgLo);
}
break;
}
if (!msize) {
malloc_report(ASL_LEVEL_ERR, "*** error with %p: msize=%d\n", (void *)current, (unsigned)msize);
break;
}
if (!is_free) {
// block in use
if (msize > NUM_TINY_SLOTS) {
malloc_report(ASL_LEVEL_ERR, "*** error at %p msize for in_use is %d\n", (void *)current, msize);
}
if (msize < 1024) {
counts[msize]++;
}
in_use++;
} else {
uintptr_t pgLo = round_page_quanta(current + sizeof(tiny_free_list_t) + sizeof(msize_t));
uintptr_t pgHi = trunc_page_quanta(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t));
if (pgLo < pgHi) {
pgTot += (pgHi - pgLo);
}
}
current += TINY_BYTES_FOR_MSIZE(msize);
}
if ((b = _simple_salloc()) != NULL) {
_simple_sprintf(b, "Tiny region [%p-%p, %y] \t", (void *)start, TINY_REGION_END(region), (int)TINY_REGION_SIZE);
_simple_sprintf(b, "Magazine=%d \t", MAGAZINE_INDEX_FOR_TINY_REGION(region));
_simple_sprintf(b, "Allocations in use=%d \t Bytes in use=%ly \t", in_use, BYTES_USED_FOR_TINY_REGION(region));
if (bytes_at_end || bytes_at_start) {
_simple_sprintf(b, "Untouched=%ly ", bytes_at_end + bytes_at_start);
}
if (DEPOT_MAGAZINE_INDEX == MAGAZINE_INDEX_FOR_TINY_REGION(region)) {
_simple_sprintf(b, "Advised MADV_FREE=%ly", pgTot);
} else {
_simple_sprintf(b, "Fragments subject to reclamation=%ly", pgTot);
}
if (verbose && in_use) {
_simple_sappend(b, "\n\tSizes in use: ");
for (ci = 0; ci < 1024; ci++) {
if (counts[ci]) {
_simple_sprintf(b, "%d[%d] ", TINY_BYTES_FOR_MSIZE(ci), counts[ci]);
}
}
}
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "%s\n", _simple_string(b));
_simple_sfree(b);
}
}
static char *tiny_freelist_fail_msg = "check: tiny free list incorrect ";
#define TINY_FREELIST_FAIL(fmt, ...) \
malloc_zone_check_fail(tiny_freelist_fail_msg, \
" (slot=%u), counter=%d\n" fmt, slot, counter, __VA_ARGS__);
boolean_t
tiny_free_list_check(rack_t *rack, grain_t slot, unsigned counter)
{
mag_index_t mag_index;
for (mag_index = -1; mag_index < rack->num_magazines; mag_index++) {
magazine_t *tiny_mag_ptr = &(rack->magazines[mag_index]);
SZONE_MAGAZINE_PTR_LOCK(tiny_mag_ptr);
unsigned count = 0;
tiny_free_list_t *ptr = rack->magazines[mag_index].mag_free_list[slot].p;
boolean_t is_free;
tiny_free_list_t *previous = NULL;
while (ptr) {
is_free = tiny_meta_header_is_free(ptr);
if (!is_free) {
TINY_FREELIST_FAIL("*** in-use ptr in free list slot=%u count=%d ptr=%p\n", slot, count, ptr);
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
return 0;
}
if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) {
TINY_FREELIST_FAIL("*** unaligned ptr in free list slot=%u count=%d ptr=%p\n", slot, count, ptr);
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
return 0;
}
if (!tiny_region_for_ptr_no_lock(rack, ptr)) {
TINY_FREELIST_FAIL("*** ptr not in szone slot=%d count=%u ptr=%p\n", slot, count, ptr);
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
return 0;
}
if (free_list_unchecksum_ptr(rack, &ptr->previous) != previous) {
TINY_FREELIST_FAIL("*** previous incorrectly set slot=%u count=%d ptr=%p\n", slot, count, ptr);
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
return 0;
}
previous = ptr;
ptr = free_list_unchecksum_ptr(rack, &ptr->next);
count++;
}
SZONE_MAGAZINE_PTR_UNLOCK(tiny_mag_ptr);
}
return 1;
}
|
19d75ed879e1ae3159eb40088e0d1000662067fe
|
fbdc48c28e54fb33ae4842ef95ff63893902c99a
|
/src/drivers/winc1500/src/nmi2c.c
|
5f2ccd2d32fef82d333f4739fb563f370d4cdf8b
|
[
"MIT"
] |
permissive
|
openmv/openmv
|
44d4b79fc8693950a2e330e5e0fd95b5c36e230f
|
8a90e070a88b7fc14c87a00351b9c4a213278419
|
refs/heads/master
| 2023-08-30T20:59:57.227603
| 2023-08-23T16:50:55
| 2023-08-23T16:50:55
| 14,360,940
| 2,150
| 1,226
|
MIT
| 2023-09-14T07:18:15
| 2013-11-13T10:23:44
|
C
|
UTF-8
|
C
| false
| false
| 6,310
|
c
|
nmi2c.c
|
/**
*
* \file
*
* \brief This module contains NMC1000 I2C protocol bus APIs implementation.
*
* Copyright (c) 2016-2018 Microchip Technology Inc. and its subsidiaries.
*
* \asf_license_start
*
* \page License
*
* Subject to your compliance with these terms, you may use Microchip
* software and any derivatives exclusively with Microchip products.
* It is your responsibility to comply with third party license terms applicable
* to your use of third party software (including open source software) that
* may accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES,
* WHETHER EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE,
* INCLUDING ANY IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY,
* AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT WILL MICROCHIP BE
* LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE, INCIDENTAL OR CONSEQUENTIAL
* LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND WHATSOEVER RELATED TO THE
* SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS BEEN ADVISED OF THE
* POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE FULLEST EXTENT
* ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN ANY WAY
* RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*
* \asf_license_stop
*
*/
#include "common/include/nm_common.h"
#ifdef CONF_WINC_USE_I2C
#include "nmi2c.h"
#include "bus_wrapper/include/nm_bus_wrapper.h"
/*
* @fn nm_i2c_read_reg_with_ret
* @brief Read register with error code return
* @param [in] u32Addr
* Register address
* @param [out] pu32RetVal
* Pointer to u32 variable used to return the read value
* @return M2M_SUCCESS in case of success and M2M_ERR_BUS_FAIL in case of failure
* @author M. Abdelmawla
* @date 11 July 2012
* @version 1.0
*/
sint8 nm_i2c_read_reg_with_ret(uint32 u32Addr, uint32* pu32RetVal)
{
uint8 b[6];
uint8 rsz;
tstrNmI2cDefault strI2c;
sint8 s8Ret = M2M_SUCCESS;
if(u32Addr < 0xff) { /* clockless i2c */
b[0] = 0x09;
b[1] = (uint8)(u32Addr);
rsz = 1;
strI2c.u16Sz = 2;
} else {
b[0] = 0x80;
b[1] = (uint8)(u32Addr >> 24);
b[2] = (uint8)(u32Addr >> 16);
b[3] = (uint8)(u32Addr >> 8);
b[4] = (uint8)(u32Addr);
b[5] = 0x04;
rsz = 4;
strI2c.u16Sz = 6;
}
strI2c.pu8Buf = b;
if(M2M_SUCCESS == nm_bus_ioctl(NM_BUS_IOCTL_W, &strI2c))
{
strI2c.u16Sz = rsz;
if(M2M_SUCCESS != nm_bus_ioctl(NM_BUS_IOCTL_R, &strI2c))
{
//M2M_ERR("read error\n");
s8Ret = M2M_ERR_BUS_FAIL;
}
}
else
{
M2M_ERR("failed to send cfg bytes\n");
s8Ret = M2M_ERR_BUS_FAIL;
}
if (rsz == 1) {
*pu32RetVal = b[0];
} else {
*pu32RetVal = b[0] | ((uint32)b[1] << 8) | ((uint32)b[2] << 16) | ((uint32)b[3] << 24);
}
return s8Ret;
}
/*
* @fn nm_i2c_read_reg
* @brief Read register
* @param [in] u32Addr
* Register address
* @return Register value
* @author M. Abdelmawla
* @date 11 July 2012
* @version 1.0
*/
uint32 nm_i2c_read_reg(uint32 u32Addr)
{
uint32 val;
nm_i2c_read_reg_with_ret(u32Addr, &val);
return val;
}
/*
* @fn nm_i2c_write_reg
* @brief write register
* @param [in] u32Addr
* Register address
* @param [in] u32Val
* Value to be written to the register
* @return M2M_SUCCESS in case of success and M2M_ERR_BUS_FAIL in case of failure
* @author M. Abdelmawla
* @date 11 July 2012
* @version 1.0
*/
sint8 nm_i2c_write_reg(uint32 u32Addr, uint32 u32Val)
{
tstrNmI2cDefault strI2c;
uint8 b[16];
sint8 s8Ret = M2M_SUCCESS;
if(u32Addr < 0xff) { /* clockless i2c */
b[0] = 0x19;
b[1] = (uint8)(u32Addr);
b[2] = (uint8)(u32Val);
strI2c.u16Sz = 3;
} else {
b[0] = 0x90;
b[1] = (uint8)(u32Addr >> 24);
b[2] = (uint8)(u32Addr >> 16);
b[3] = (uint8)(u32Addr >> 8);
b[4] = (uint8)u32Addr;
b[5] = 0x04;
b[6] = (uint8)u32Val;
b[7] = (uint8)(u32Val >> 8);
b[8] = (uint8)(u32Val >> 16);
b[9] = (uint8)(u32Val >> 24);
strI2c.u16Sz = 10;
}
strI2c.pu8Buf = b;
if(M2M_SUCCESS != nm_bus_ioctl(NM_BUS_IOCTL_W, &strI2c))
{
M2M_ERR("write error\n");
s8Ret = M2M_ERR_BUS_FAIL;
}
return s8Ret;
}
/*
* @fn nm_i2c_read_block
* @brief Read block of data
* @param [in] u32Addr
* Start address
* @param [out] puBuf
* Pointer to a buffer used to return the read data
* @param [in] u16Sz
* Number of bytes to read. The buffer size must be >= u16Sz
* @return M2M_SUCCESS in case of success and M2M_ERR_BUS_FAIL in case of failure
* @author M. Abdelmawla
* @date 11 July 2012
* @version 1.0
*/
sint8 nm_i2c_read_block(uint32 u32Addr, uint8 *pu8Buf, uint16 u16Sz)
{
tstrNmI2cDefault strI2c;
uint8 au8Buf[7];
sint8 s8Ret = M2M_SUCCESS;
au8Buf[0] = 0x02;
au8Buf[1] = (uint8)(u32Addr >> 24);
au8Buf[2] = (uint8)(u32Addr >> 16);
au8Buf[3] = (uint8)(u32Addr >> 8);
au8Buf[4] = (uint8)(u32Addr >> 0);
au8Buf[5] = (uint8)(u16Sz >> 8);
au8Buf[6] = (uint8)(u16Sz);
strI2c.pu8Buf = au8Buf;
strI2c.u16Sz = sizeof(au8Buf);
if(M2M_SUCCESS != nm_bus_ioctl(NM_BUS_IOCTL_W, &strI2c))
{
M2M_ERR("write error\n");
s8Ret = M2M_ERR_BUS_FAIL;
}
else
{
strI2c.pu8Buf = pu8Buf;
strI2c.u16Sz = u16Sz;
if(M2M_SUCCESS != nm_bus_ioctl(NM_BUS_IOCTL_R, &strI2c))
{
M2M_ERR("read error\n");
s8Ret = M2M_ERR_BUS_FAIL;
}
}
return s8Ret;
}
/*
* @fn nm_i2c_write_block
* @brief Write block of data
* @param [in] u32Addr
* Start address
* @param [in] puBuf
* Pointer to the buffer holding the data to be written
* @param [in] u16Sz
* Number of bytes to write. The buffer size must be >= u16Sz
* @return M2M_SUCCESS in case of success and M2M_ERR_BUS_FAIL in case of failure
* @author M. Abdelmawla
* @date 11 July 2012
* @version 1.0
*/
sint8 nm_i2c_write_block(uint32 u32Addr, uint8 *pu8Buf, uint16 u16Sz)
{
uint8 au8Buf[7];
tstrNmI2cSpecial strI2c;
sint8 s8Ret = M2M_SUCCESS;
au8Buf[0] = 0x12;
au8Buf[1] = (uint8)(u32Addr >> 24);
au8Buf[2] = (uint8)(u32Addr >> 16);
au8Buf[3] = (uint8)(u32Addr >> 8);
au8Buf[4] = (uint8)(u32Addr);
au8Buf[5] = (uint8)(u16Sz >> 8);
au8Buf[6] = (uint8)(u16Sz);
strI2c.pu8Buf1 = au8Buf;
strI2c.pu8Buf2 = pu8Buf;
strI2c.u16Sz1 = sizeof(au8Buf);
strI2c.u16Sz2 = u16Sz;
if(M2M_SUCCESS != nm_bus_ioctl(NM_BUS_IOCTL_W_SPECIAL, &strI2c))
{
M2M_ERR("write error\n");
s8Ret = M2M_ERR_BUS_FAIL;
}
return s8Ret;
}
#endif
/* EOF */
|
226dfab61e007b92f116e1398a6c15ee1ff06e86
|
aa3befea459382dc5c01c925653d54f435b3fb0f
|
/libs/libnx/nxtk/nxtk_opentoolbar.c
|
d4beeb777a74777cd7188e2d4057d74f5737874b
|
[
"MIT-open-group",
"BSD-3-Clause",
"HPND-sell-variant",
"BSD-4-Clause-UC",
"LicenseRef-scancode-warranty-disclaimer",
"MIT-0",
"LicenseRef-scancode-bsd-atmel",
"LicenseRef-scancode-gary-s-brown",
"LicenseRef-scancode-proprietary-license",
"SunPro",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-other-permissive",
"HPND",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"GPL-1.0-or-later",
"CC-BY-2.0",
"CC-BY-4.0"
] |
permissive
|
apache/nuttx
|
14519a7bff4a87935d94fb8fb2b19edb501c7cec
|
606b6d9310fb25c7d92c6f95bf61737e3c79fa0f
|
refs/heads/master
| 2023-08-25T06:55:45.822534
| 2023-08-23T16:03:31
| 2023-08-24T21:25:47
| 228,103,273
| 407
| 241
|
Apache-2.0
| 2023-09-14T18:26:05
| 2019-12-14T23:27:55
|
C
|
UTF-8
|
C
| false
| false
| 3,599
|
c
|
nxtk_opentoolbar.c
|
/****************************************************************************
* libs/libnx/nxtk/nxtk_opentoolbar.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdlib.h>
#include <errno.h>
#include <debug.h>
#include <nuttx/nx/nx.h>
#include <nuttx/nx/nxtk.h>
#include "nxtk.h"
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: nxtk_opentoolbar
*
* Description:
* Create a tool bar at the top of the specified framed window
*
* Input Parameters:
* hfwnd - The handle returned by nxtk_openwindow
* height - The request height of the toolbar in pixels
* cb - Callbacks used to process toolbar events
* arg - User provided value that will be returned with toolbar
* callbacks.
*
* Returned Value:
* OK on success; ERROR on failure with errno set appropriately
*
****************************************************************************/
int nxtk_opentoolbar(NXTKWINDOW hfwnd, nxgl_coord_t height,
FAR const struct nx_callback_s *cb,
FAR void *arg)
{
FAR struct nxtk_framedwindow_s *fwnd =
(FAR struct nxtk_framedwindow_s *)hfwnd;
#ifdef CONFIG_DEBUG_FEATURES
if (hfwnd == NULL || cb == NULL || height < 1)
{
set_errno(EINVAL);
return ERROR;
}
#endif
/* Initialize the toolbar info */
fwnd->tbheight = height;
fwnd->tbcb = cb;
fwnd->tbarg = arg;
/* Calculate the new dimensions of the toolbar and client windows */
nxtk_setsubwindows(fwnd);
#ifdef CONFIG_NX_RAMBACKED
/* The redraw request has no effect if a framebuffer is used with the
* window. For that type of window, the application must perform the
* toolbar update itself and not rely on a redraw notification.
*/
if (NXBE_ISRAMBACKED(&fwnd->wnd))
{
struct nxgl_rect_s relbounds;
/* Convert to a window-relative bounding box */
nxgl_rectoffset(&relbounds, &fwnd->wnd.bounds,
-fwnd->wnd.bounds.pt1.x, -fwnd->wnd.bounds.pt1.y);
/* Then re-draw the frame */
nxtk_drawframe(fwnd, &relbounds); /* Does not fail */
}
else
#endif
{
/* Redraw the entire window, even the client window must be redrawn
* because it has changed its vertical position and size.
*/
nx_redrawreq(&fwnd->wnd, &fwnd->wnd.bounds);
}
return OK;
}
|
e91bb416ba35b7563fffc864e7cc7e1c8e462594
|
fa8b9ca710602c44ec391953348b2644f68dd321
|
/bench/security/common.h
|
5a5cb53cd4138d443601f8ba63cb139d8ed2c671
|
[
"MIT"
] |
permissive
|
daanx/mimalloc-bench
|
de574c4d6c8e2042cb0096a3c63708a8ef958cf7
|
833b1663ef274f204b0011641aac6e78d96f8cee
|
refs/heads/master
| 2023-08-30T19:48:50.036029
| 2023-08-30T17:51:46
| 2023-08-30T17:51:46
| 192,573,707
| 268
| 48
|
MIT
| 2023-09-08T11:50:23
| 2019-06-18T16:09:59
|
C
|
UTF-8
|
C
| false
| false
| 708
|
h
|
common.h
|
#ifndef _MIMALLOC_BENCH_SECURITY_COMMON_H_
#define _MIMALLOC_BENCH_SECURITY_COMMON_H_
#ifndef ALLOCATION_SIZE
#error Unspecified allocation size
#endif
#define NOT_CAUGHT() do { puts("NOT_CAUGHT"); fflush(stdout); } while ((0));
#if defined(_MSC_VER)
#define NOINLINE __declspec(noinline)
#elif defined(__INTEL_COMPILER)
#define NOINLINE _Pragma("noinline")
#else
#define NOINLINE __attribute((noinline))
#endif
NOINLINE
void* memcpy_noinline(void* dest, const void* src, size_t n) {
return memcpy(dest, src, n);
}
NOINLINE
void* malloc_noinline(size_t size) {
return malloc(size);
}
NOINLINE
void free_noinline(void* ptr) {
return free(ptr);
}
#endif //_MIMALLOC_BENCH_SECURITY_COMMON_H
|
56fb176676ad332b0f992f26820e1c00a3898eb6
|
1efd2de8bf77ec00eb2fcaf5749278495946d920
|
/src/tests/simple_array.c
|
f3ddb7f18e1d4d546338cc6f82ce710365bd41e8
|
[
"BSD-2-Clause",
"BSD-2-Clause-Patent"
] |
permissive
|
daos-stack/daos
|
6f55bf3061fd830d5b8d28506e1295e2d3a27c38
|
ed5eed5df43a68571afe123132a743824c02637a
|
refs/heads/master
| 2023-08-31T21:43:37.606145
| 2023-08-31T16:38:00
| 2023-08-31T16:38:00
| 69,390,670
| 631
| 300
|
NOASSERTION
| 2023-09-14T18:55:15
| 2016-09-27T19:21:29
|
C
|
UTF-8
|
C
| false
| false
| 15,060
|
c
|
simple_array.c
|
/**
* (C) Copyright 2016-2022 Intel Corporation.
*
* SPDX-License-Identifier: BSD-2-Clause-Patent
*/
/**
* Simple sliced 1D array example
*
* We consider a 1D non-sparse array of ARRAY_SIZE elements. Each element is
* a fixed-size 64-bit integer and has an index ranging from 0 to ARRAY_SIZE-1.
* The content of this array is distributed over SHARD_NR shards. Each array
* shard is associated with a dkey set to the shard ID. A single array (akey
* set to "data") is used in this example to store the shard content.
* The array is partitioned into fixed-size (i.e. SLICE_SIZE) slices of
* contiguous elements which are stored on shards in a round-robin fashion.
*
* Each iteration completely overwrites the array by setting each element
* to the epoch number associated with the iteration. Each MPI task writes a
* different set of slices at each iteration and has a limited number of
* I/O requests in flight. Once a task is done with an iteration, it notifies
* the transaction manager (i.e. rank 0) and moves on to the next iteration by
* bumping the epoch number. The transaction manager is responsible for
* flushing and committing the epoch once all tasks have reported completion.
*/
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
#include <daos/tests_lib.h>
#include <daos.h>
#include "simple_common.h"
/** local task information */
int rank = -1;
int rankn = -1;
char node[128] = "unknown";
/** Name of the process set associated with the DAOS server */
#define DSS_PSETID "daos_tier0"
/** Event queue */
daos_handle_t eq;
/** Pool information */
uuid_t pool_uuid; /* only used on rank 0 */
d_rank_list_t svcl; /* only used on rank 0 */
daos_handle_t poh; /* shared pool handle */
/** Container information */
uuid_t co_uuid; /* only used on rank 0 */
daos_handle_t coh; /* shared container handle */
daos_epoch_t epoch; /* epoch in-use */
/**
* Object information
* DAOS uses the high 32-bit of the object ID, the rest is supposed to be
* unique, just set the low bits to 1 in this example
*/
daos_obj_id_t oid = {
.lo = 0x1,
};
/** class identifier */
daos_oclass_id_t cid = 0x1;/* class identifier */
/**
* Array parameters
* Each task overwrites a different section of the array at every iteration.
* An epoch number is associated with each iteration. One task can have at
* most MAX_IOREQS I/O requests in flight and then needs to wait for completion
* of an request in flight before sending a new one.
* The actual data written in the array is the epoch number.
*/
#define TEST_ARRAY_SIZE 1000000000 /* 1B entries * 8-byte entry bits = 8GB */
#define SLICE_SIZE 10000 /* size of an array slice, 10K entries */
#define SHARD_NR 1000 /* static number of array shards, 1K */
#define ITER_NR 10 /* number of global iteration */
#define KEY_LEN 10 /* enough to write the shard ID */
#define MAX_IOREQS 10 /* number of concurrent i/o reqs in flight */
/** an i/o request in flight */
struct io_req {
char dstr[KEY_LEN];
daos_key_t dkey;
daos_recx_t recx;
daos_iod_t iod;
d_iov_t iov;
d_sg_list_t sg;
daos_event_t ev;
};
/** a single akey is used in this example and is set to the string "data" */
char astr[] = "data";
/** data buffer */
uint64_t data[SLICE_SIZE];
void
pool_create(void)
{
int rc;
/**
* allocate list of service nodes, returned as output parameter of
* dmg_pool_create() and used to connect
*/
/** create pool over all the storage targets */
svcl.rl_nr = 3;
D_ALLOC_ARRAY(svcl.rl_ranks, svcl.rl_nr);
ASSERT(svcl.rl_ranks);
rc = dmg_pool_create(NULL /* config file */,
geteuid() /* user owner */,
getegid() /* group owner */,
DSS_PSETID /* daos server process set ID */,
NULL /* list of targets, NULL = all */,
10ULL << 30 /* target SCM size, 10G */,
40ULL << 30 /* target NVMe size, 40G */,
NULL /* pool props */,
&svcl /* pool service nodes */,
pool_uuid /* the uuid of the pool created */);
ASSERT(rc == 0, "pool create failed with %d", rc);
}
void
pool_destroy(void)
{
int rc;
/** destroy the pool created in pool_create */
rc = dmg_pool_destroy(NULL, pool_uuid, DSS_PSETID, 1 /* force */);
ASSERT(rc == 0, "pool destroy failed with %d", rc);
D_FREE(svcl.rl_ranks);
}
static inline void
ioreqs_init(struct io_req *reqs) {
int rc;
int j;
for (j = 0; j < MAX_IOREQS; j++) {
struct io_req *req = &reqs[j];
/** initialize event */
rc = daos_event_init(&req->ev, eq, NULL);
ASSERT(rc == 0, "event init failed with %d", rc);
/** initialize dkey */
req->dkey = (daos_key_t) {
.iov_buf = &req->dstr,
.iov_buf_len = KEY_LEN,
};
/** initialize i/o descriptor */
req->iod.iod_name = (daos_key_t) {
.iov_buf = astr,
.iov_buf_len = strlen(astr),
.iov_len = strlen(astr),
};
req->iod.iod_nr = 1;
req->iod.iod_size = sizeof(uint64_t);
req->recx = (daos_recx_t) {
.rx_nr = SLICE_SIZE,
};
req->iod.iod_recxs = &req->recx;
/** initialize scatter/gather */
req->iov = (d_iov_t) {
.iov_buf = &data,
.iov_buf_len = SLICE_SIZE * sizeof(data[0]),
.iov_len = SLICE_SIZE * sizeof(data[0]),
};
req->sg.sg_nr = 1;
req->sg.sg_iovs = &req->iov;
}
}
void
array(void)
{
daos_handle_t oh;
struct io_req *reqs;
int rc;
int iter;
int k;
/** allocate and initialize I/O requests */
D_ALLOC_ARRAY(reqs, MAX_IOREQS);
ASSERT(reqs != NULL, "malloc of reqs failed");
ioreqs_init(reqs);
/** open DAOS object */
rc = daos_obj_open(coh, oid, DAOS_OO_RW, &oh, NULL);
ASSERT(rc == 0, "object open failed with %d", rc);
/** Transactional overwrite of the array at each iteration */
for (iter = 0; iter < ITER_NR; iter++) {
MPI_Request request;
daos_event_t *evp[MAX_IOREQS];
uint64_t sid; /* slice ID */
int submitted = 0;
struct io_req *req = &reqs[0];
/** store very basic array data */
for (k = 0; k < SLICE_SIZE; k++)
data[k] = epoch;
/**
* For testing purpose, each thread starts with a different
* slice at each epoch and then skips the next rankn - 1
* slices (rank 0 is the transaction manager and does not
* perform any I/O operations).
*/
for (sid = (rank - 1 + epoch) % (rankn - 1);
sid < TEST_ARRAY_SIZE / SLICE_SIZE;
sid += rankn - 1) {
/**
* dkey is set to the shard ID which is equal
* to the slice ID % SHARD_NR
*/
rc = snprintf(req->dstr, KEY_LEN, "%lud",
sid % SHARD_NR);
ASSERT(rc < KEY_LEN, "increase KEY_LEN");
req->dkey.iov_len = strlen(req->dstr);
/**
* Index inside the array to write this slice.
* Two options here:
* - use the logical array index, which means that there
* will be a gap in the array index between each
* slice.
* - write all slices in contiguous indexes inside the
* array.
* For simplicity, the former approach is implemented
* in this example which means that the index inside the
* array/shard matches the logical array index.
*/
req->recx.rx_idx = sid * SLICE_SIZE;
/** submit I/O operation */
rc = daos_obj_update(oh, DAOS_TX_NONE, 0, &req->dkey, 1,
&req->iod, &req->sg,
&req->ev);
ASSERT(rc == 0, "object update failed with %d", rc);
submitted++;
if (submitted < MAX_IOREQS) {
/** haven't reached max request in flight yet */
req++;
} else {
/**
* max request request in flight reached, wait
* for one i/o to complete to reuse the slot
*/
rc = daos_eq_poll(eq, 1, DAOS_EQ_WAIT, 1, evp);
ASSERT(rc == 1, "eq poll failed with %d", rc);
/** check for any I/O operation error */
ASSERT(evp[0]->ev_error == 0,
"I/O operation failed with %d",
evp[0]->ev_error);
submitted--;
req = container_of(evp[0], struct io_req, ev);
}
}
/** poll all remaining I/O requests */
rc = daos_eq_poll(eq, 1, DAOS_EQ_WAIT, submitted, evp);
ASSERT(rc == submitted, "eq poll failed with %d", rc);
/** check for any I/O error */
for (k = 0; k < submitted; k++)
ASSERT(evp[k]->ev_error == 0,
"I/O operation failed with %d",
evp[k]->ev_error);
/**
* notify rank 0 that we are done with this epoch
* tried first with MPI_IBarrier() with no luck, rewrote with
* MPI_Isend/Irecv.
*/
rc = MPI_Isend(&epoch, 1, MPI_UINT64_T, 0, epoch,
MPI_COMM_WORLD, &request);
ASSERT(rc == MPI_SUCCESS, "ISend failed");
MPI_Wait(&request, MPI_STATUS_IGNORE);
/**
* rank 0 will flush & commit once everyone is done.
* meanwhile, move on to the next epoch
*/
epoch++;
}
/** close DAOS object */
rc = daos_obj_close(oh, NULL);
ASSERT(rc == 0, "object cloase failed with %d", rc);
/** release events */
for (k = 0; k < MAX_IOREQS; k++) {
rc = daos_event_fini(&reqs[k].ev);
ASSERT(rc == 0, "event fini failed with %d", rc);
}
D_FREE(reqs);
}
/** states of the epoch state machine executed by the transaction manager */
typedef enum {
EP_NONE, /* nothing interesting yet */
EP_WR_DONE, /* all tasks reported completion, next step is flush */
EP_FLUSHED, /* epoch flushed, next step is commit */
EP_COMMITTED,/* epoch committed, no further work required */
} ep_phase_t;
/** per-epoch information */
struct ep_state {
int ref; /* #tasks that already reported completion */
ep_phase_t state; /* epoch state, see above */
};
/** Main routine of the transaction manager */
void
committer()
{
struct ep_state ep_track[ITER_NR];
daos_epoch_t ep_start;
daos_epoch_t ep_rcv;
daos_event_t ev;
MPI_Request request;
int rc;
int j;
rc = daos_event_init(&ev, eq, NULL);
ASSERT(rc == 0, "event init failed with %d", rc);
ep_start = epoch;
for (j = 0; j < ITER_NR; j++) {
ep_track[j].ref = 0;
ep_track[j].state = EP_NONE;
}
/** post an initial buffer */
rc = MPI_Irecv(&ep_rcv, 1, MPI_UINT64_T, MPI_ANY_SOURCE, MPI_ANY_TAG,
MPI_COMM_WORLD, &request);
ASSERT(rc == MPI_SUCCESS, "Irecv failed");
for (;;) {
MPI_Status status;
daos_event_t *evp;
int daos_comp = 0;
int mpi_comp = 0;
/** poll for incoming message or commit/flush completion */
do {
daos_comp = daos_eq_poll(eq, 0, DAOS_EQ_NOWAIT, 1,
&evp);
MPI_Test(&request, &mpi_comp, &status);
} while (mpi_comp == 0 && daos_comp == 0);
/** message received */
if (mpi_comp) {
int count;
MPI_Get_count(&status, MPI_UINT64_T, &count);
ASSERT(count == 1, "Irecv test failed");
/** bump ref count */
ep_track[ep_rcv - ep_start].ref++;
/** post a new buffer */
rc = MPI_Irecv(&ep_rcv, 1, MPI_UINT64_T, MPI_ANY_SOURCE,
MPI_ANY_TAG, MPI_COMM_WORLD, &request);
ASSERT(rc == MPI_SUCCESS, "Irecv failed");
}
/** DAOS flush or commit completed */
if (daos_comp) {
ep_phase_t *state = &ep_track[epoch - ep_start].state;
ASSERT(daos_comp == 1, "eq pool failed with %d",
daos_comp);
ASSERT(&ev == evp, "events mismatch");
ASSERT(ep_track[epoch - ep_start].ref == rankn - 1,
"event completion while some tasks haven't "
"reported epoch completion yet");
if (*state == EP_WR_DONE) {
/** flush completed */
ASSERT(ev.ev_error == 0,
"flush failed with %d", ev.ev_error);
*state = EP_FLUSHED;
} else if (*state == EP_FLUSHED) {
/** commit completed */
ASSERT(ev.ev_error == 0,
"commit failed with %d", ev.ev_error);
*state = EP_COMMITTED;
/** successful commit, bump epoch */
epoch++;
if (epoch - ep_start == ITER_NR)
/**
* all epochs are committed,
* we are done
*/
break;
} else {
ASSERT(0, "invalid state %d", *state);
}
}
/** everybody is done with this epoch */
if (ep_track[epoch - ep_start].ref == rankn - 1) {
ep_phase_t *state = &ep_track[epoch - ep_start].state;
ASSERT(*state == EP_NONE, "invalid epoch state");
*state = EP_WR_DONE;
}
}
/** we posted one extra buffer, let's cancel it */
MPI_Cancel(&request);
rc = daos_event_fini(&ev);
ASSERT(rc == 0, "event fini failed with %d", rc);
}
int
main(int argc, char **argv)
{
int rc;
rc = gethostname(node, sizeof(node));
ASSERT(rc == 0, "buffer for hostname too small");
rc = MPI_Init(&argc, &argv);
ASSERT(rc == MPI_SUCCESS, "MPI_Init failed with %d", rc);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &rankn);
/** initialize the local DAOS stack */
rc = daos_init();
ASSERT(rc == 0, "daos_init failed with %d", rc);
/** create event queue */
rc = daos_eq_create(&eq);
ASSERT(rc == 0, "eq create failed with %d", rc);
if (rank == 0) {
char str[37];
/** create a test pool and container for this test */
pool_create();
/** connect to the just created DAOS pool */
uuid_unparse(pool_uuid, str);
rc = daos_pool_connect(str, DSS_PSETID,
DAOS_PC_EX /* exclusive access */,
&poh /* returned pool handle */,
NULL /* returned pool info */,
NULL /* event */);
ASSERT(rc == 0, "pool connect failed with %d", rc);
}
/** share pool handle with peer tasks */
handle_share(&poh, HANDLE_POOL, rank, poh, 1);
if (rank == 0) {
char str[37];
/** create container */
rc = daos_cont_create(poh, &co_uuid, NULL /* properties */,
NULL /* event */);
ASSERT(rc == 0, "container create failed with %d", rc);
/** open container */
uuid_unparse(co_uuid, str);
rc = daos_cont_open(poh, str, DAOS_COO_RW, &coh, NULL, NULL);
ASSERT(rc == 0, "container open failed with %d", rc);
}
/** share container handle with peer tasks */
handle_share(&coh, HANDLE_CO, rank, poh, 1);
/** generate objid */
daos_obj_generate_oid(coh, &oid, 0, cid, 0, 0);
if (rank == 0) {
struct daos_oclass_attr cattr = {
.ca_schema = DAOS_OS_STRIPED,
.ca_resil_degree = 0 /* TBD */,
.ca_resil = DAOS_RES_REPL,
.ca_grp_nr = 4,
.u.rp = {
.r_proto = 0 /* TBD */,
.r_num = 2 /* TBD */,
},
};
/** register a default object class */
rc = daos_obj_register_class(coh, cid, &cattr, NULL);
ASSERT(rc == 0, "class register failed with %d", rc);
}
/** broadcast current LHE to all peers */
rc = MPI_Bcast(&epoch, 1, MPI_UINT64_T, 0, MPI_COMM_WORLD);
ASSERT(rc == MPI_SUCCESS, "LHE broadcast failed with %d", rc);
/** start real work */
if (rank == 0)
/** rank 0 is the transaction manager */
committer();
else
/** the other tasks write the array */
array();
/** close container */
rc = daos_cont_close(coh, NULL);
if (rc)
D_ERROR("daos_cont_close() Failed "DF_RC"\n", DP_RC(rc));
/** disconnect from pool & destroy it */
rc = daos_pool_disconnect(poh, NULL);
if (rc)
D_ERROR("daos_pool_disconnect() Failed "DF_RC"\n", DP_RC(rc));
if (rank == 0)
/** free allocated storage */
pool_destroy();
/** destroy event queue */
rc = daos_eq_destroy(eq, 0);
ASSERT(rc == 0, "eq destroy failed with %d", rc);
/** shutdown the local DAOS stack */
rc = daos_fini();
ASSERT(rc == 0, "daos_fini failed with %d", rc);
MPI_Finalize();
return rc;
}
|
eda15ae8b0a3939f9646a611ddf1510db5dafb50
|
1adbea64905f41b978e42844d72721230272c69d
|
/src/shared/pal/inc/rt/dbghelp.h
|
1d231d34fc3989e6fcad84fc436ab8b3b1837c04
|
[
"MIT"
] |
permissive
|
dotnet/diagnostics
|
5b3e0901f87d6429fca4282d149986546cee8eb4
|
3357ebf5f4aaf4fe196c08e3e47fa2233096fada
|
refs/heads/main
| 2023-09-01T12:42:17.138682
| 2023-09-01T08:37:32
| 2023-09-01T08:37:32
| 126,257,942
| 1,156
| 361
|
MIT
| 2023-09-14T13:47:19
| 2018-03-22T00:39:48
|
C++
|
UTF-8
|
C
| false
| false
| 699
|
h
|
dbghelp.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*++ BUILD Version: 0000 Increment this if a change has global effects
Module Name:
dbghelp.h
Abstract:
This module defines the prototypes and constants required for the image
help routines.
Contains debugging support routines that are redistributable.
Revision History:
--*/
#ifndef _DBGHELP_
#define _DBGHELP_
#if _MSC_VER > 1020
#pragma once
#endif
//
// options that are set/returned by SymSetOptions() & SymGetOptions()
// these are used as a mask
//
#define SYMOPT_LOAD_LINES 0x00000010
#endif // _DBGHELP_
|
bbabe66d4c8a46a3f97454615252ca5c9fba162c
|
28d0f8c01599f8f6c711bdde0b59f9c2cd221203
|
/tests/usr.bin/indent/opt_ci.c
|
63752a824bc5819e9e1a8941529700fb1d43a7f2
|
[] |
no_license
|
NetBSD/src
|
1a9cbc22ed778be638b37869ed4fb5c8dd616166
|
23ee83f7c0aea0777bd89d8ebd7f0cde9880d13c
|
refs/heads/trunk
| 2023-08-31T13:24:58.105962
| 2023-08-27T15:50:47
| 2023-08-27T15:50:47
| 88,439,547
| 656
| 348
| null | 2023-07-20T20:07:24
| 2017-04-16T20:03:43
| null |
UTF-8
|
C
| false
| false
| 3,418
|
c
|
opt_ci.c
|
/* $NetBSD: opt_ci.c,v 1.11 2023/06/10 08:17:04 rillig Exp $ */
/*
* Tests for the option '-ci', which controls the indentation of continuation
* lines in statements and declarations, but only inside a function.
*/
/*
* Top level expressions with and without parentheses.
*/
//indent input
int top_level = 1 +
2;
int top_level = (1 +
2 + (
3));
//indent end
//indent run -ci0
int top_level = 1 +
2;
int top_level = (1 +
2 + (
3));
//indent end
//indent run-equals-prev-output -ci2
//indent run-equals-prev-output -ci4
//indent run-equals-prev-output -ci8
//indent run -ci0 -nlp
int top_level = 1 +
2;
int top_level = (1 +
2 + (
3));
//indent end
//indent run -ci2 -nlp
int top_level = 1 +
2;
int top_level = (1 +
2 + (
3));
//indent end
/*
* Between 2019-04-04 and 2023-06-09, there was a special rule that prevented
* indentation based on the number of open parentheses, in the case that the
* continuation indentation is half an indentation level, maybe to prevent that
* the continuation line has the same indentation as a follow-up statement,
* such as in 'if' statements. To prevent such ambiguities, see '-eei'.
*/
//indent run -ci4 -nlp
int top_level = 1 +
2;
int top_level = (1 +
2 + (
3));
//indent end
/*
* Declarations in functions without parentheses.
*/
//indent input
int
sum(int a, int b)
{
return a +
b;
return first +
second;
}
//indent end
//indent run -ci0
int
sum(int a, int b)
{
return a +
b;
return first +
second;
}
//indent end
//indent run -ci2
int
sum(int a, int b)
{
return a +
b;
return first +
second;
}
//indent end
//indent run -ci4
int
sum(int a, int b)
{
return a +
b;
return first +
second;
}
//indent end
//indent run -ci8
int
sum(int a, int b)
{
return a +
b;
return first +
second;
}
//indent end
/*
* Continued statements with expressions in parentheses.
*/
//indent input
int
sum(int a, int b)
{
return (a +
b);
return (first +
second + (
third));
}
//indent end
//indent run -ci0
int
sum(int a, int b)
{
return (a +
b);
return (first +
second + (
third));
}
//indent end
//indent run-equals-prev-output -ci2
//indent run-equals-prev-output -ci4
//indent run-equals-prev-output -ci8
//indent run -ci2 -nlp
int
sum(int a, int b)
{
return (a +
b);
return (first +
second + (
third));
}
//indent end
//indent run -ci4 -nlp
int
sum(int a, int b)
{
return (a +
b);
return (first +
second + (
third));
}
//indent end
//indent run -ci8 -nlp
int
sum(int a, int b)
{
return (a +
b);
return (first +
second + (
third));
}
//indent end
/*
* In the default configuration, the indentation level from '-i' is the same
* as the continuation indentation from '-ci'. The difference between these
* becomes visible for structural macros like 'forever' or 'foreach'.
*/
//indent input
#define forever for (;;)
void
function(void)
{
forever
stmt();
forever {
stmt();
}
}
//indent end
//indent run-equals-input
/*
* The difference between the block indentation and the continuation
* indentation only becomes visible when these two differ.
*/
//indent run -i8 -ci4
#define forever for (;;)
void
function(void)
{
forever
stmt();
forever {
stmt();
}
}
//indent end
//indent input
{
size_t last_word_len = com.len
- (size_t)(last_blank + 1);
}
//indent end
//indent run-equals-input -ldi0 -ci4
|
4b9b03d6baf5fcc8e0b652ee838466f6834d6c04
|
c7c73566784a7896100e993606e1bd8fdd0ea94e
|
/panda/metalibs/pandagl/pandagl.h
|
3fb84b21526604d49655dfc3a9e7bf5e35f3aff5
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
panda3d/panda3d
|
c3f94df2206ff7cfe4a3b370777a56fb11a07926
|
160ba090a5e80068f61f34fc3d6f49dbb6ad52c5
|
refs/heads/master
| 2023-08-21T13:23:16.904756
| 2021-04-11T22:55:33
| 2023-08-06T06:09:32
| 13,212,165
| 4,417
| 1,072
|
NOASSERTION
| 2023-09-09T19:26:14
| 2013-09-30T10:20:25
|
C++
|
UTF-8
|
C
| false
| false
| 336
|
h
|
pandagl.h
|
/**
* @file pandagl.h
* @author drose
* @date 2001-01-02
*/
#ifndef PANDAGL_H
#define PANDAGL_H
#include "pandabase.h"
EXPCL_PANDAGL void init_libpandagl();
extern "C" EXPCL_PANDAGL int get_pipe_type_pandagl();
#if defined(HAVE_EGL) && !defined(USE_X11)
extern "C" EXPCL_PANDAGL int get_pipe_type_p3headlessgl();
#endif
#endif
|
756f3567262d50497375a08dab2a5c5d48a2fa97
|
aa3befea459382dc5c01c925653d54f435b3fb0f
|
/include/nuttx/lcd/ssd1680.h
|
9fcdf37d879c188f1ea7c8aff49a9940ea8b9d0b
|
[
"MIT-open-group",
"BSD-3-Clause",
"HPND-sell-variant",
"BSD-4-Clause-UC",
"LicenseRef-scancode-warranty-disclaimer",
"MIT-0",
"LicenseRef-scancode-bsd-atmel",
"LicenseRef-scancode-gary-s-brown",
"LicenseRef-scancode-proprietary-license",
"SunPro",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-other-permissive",
"HPND",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"GPL-1.0-or-later",
"CC-BY-2.0",
"CC-BY-4.0"
] |
permissive
|
apache/nuttx
|
14519a7bff4a87935d94fb8fb2b19edb501c7cec
|
606b6d9310fb25c7d92c6f95bf61737e3c79fa0f
|
refs/heads/master
| 2023-08-25T06:55:45.822534
| 2023-08-23T16:03:31
| 2023-08-24T21:25:47
| 228,103,273
| 407
| 241
|
Apache-2.0
| 2023-09-14T18:26:05
| 2019-12-14T23:27:55
|
C
|
UTF-8
|
C
| false
| false
| 6,402
|
h
|
ssd1680.h
|
/****************************************************************************
* include/nuttx/lcd/ssd1680.h
*
* Driver for Solomon Systech SSD1680 e-paper controller
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
#ifndef __INCLUDE_NUTTX_LCD_SSD1680_H
#define __INCLUDE_NUTTX_LCD_SSD1680_H
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdbool.h>
#include <nuttx/arch.h>
#ifdef CONFIG_LCD_SSD1680
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/* Configuration ************************************************************/
/* SSD1680 configuration settings:
*
* CONFIG_SSD1680_SPIMODE - Controls the SPI mode
* CONFIG_SSD1680_FREQUENCY - Define to use a different bus frequency
*
* Required LCD driver settings:
*
* CONFIG_LCD_MAXCONTRAST should be 255, but any value >0 and <=255
* will be accepted.
* CONFIG_LCD_MAXPOWER must be 1
*
* Optional LCD driver settings:
* CONFIG_LCD_LANDSCAPE, CONFIG_LCD_PORTRAIT, CONFIG_LCD_RLANDSCAPE, and
* CONFIG_LCD_RPORTRAIT - Display orientation.
*
* Required SPI driver settings:
* CONFIG_SPI_CMDDATA - Include support for cmd/data selection.
*/
/* SPI Interface
*
* "The serial interface consists of serial clock SCL, serial data SI, CS and
* CMD/!DTA. SI is shifted into an 8-bit shift register on every rising edge
* of SCL in the order of D7, D6, ... and D0. CMD/!DTA is sampled on every
* eighth clock and the data byte in the shift register is written to the
* display data RAM or command register in the same clock."
*
* "This module determines whether the input data is interpreted as data or
* command. When CMD/!DTA = "H," the inputs at D7 - D0 are interpreted as
* data and be written to display RAM. When CMD/!DTA = "L", the inputs at
* D7 - D0 are interpreted as command, they will be decoded and be written
* to the corresponding command registers."
*/
#ifndef CONFIG_SPI_CMDDATA
# error "CONFIG_SPI_CMDDATA must be defined in your NuttX configuration"
#endif
/* Check contrast selection */
#if !defined(CONFIG_LCD_MAXCONTRAST)
# define CONFIG_LCD_MAXCONTRAST 255
#endif
#if CONFIG_LCD_MAXCONTRAST <= 0 || CONFIG_LCD_MAXCONTRAST > 255
# error "CONFIG_LCD_MAXCONTRAST exceeds supported maximum"
#endif
/* Check power setting */
#if !defined(CONFIG_LCD_MAXPOWER)
# define CONFIG_LCD_MAXPOWER 1
#endif
#if CONFIG_LCD_MAXPOWER != 1
# warning "CONFIG_LCD_MAXPOWER exceeds supported maximum"
# undef CONFIG_LCD_MAXPOWER
# define CONFIG_LCD_MAXPOWER 1
#endif
/* Color is 1bpp monochrome with leftmost column contained in bits 0 */
#ifdef CONFIG_NX_DISABLE_1BPP
# warning "1 bit-per-pixel support needed"
#endif
/* Orientation */
#if defined(CONFIG_LCD_LANDSCAPE)
# undef CONFIG_LCD_PORTRAIT
# undef CONFIG_LCD_RLANDSCAPE
# undef CONFIG_LCD_RPORTRAIT
#elif defined(CONFIG_LCD_PORTRAIT)
# undef CONFIG_LCD_LANDSCAPE
# undef CONFIG_LCD_RLANDSCAPE
# undef CONFIG_LCD_RPORTRAIT
#elif defined(CONFIG_LCD_RLANDSCAPE)
# undef CONFIG_LCD_LANDSCAPE
# undef CONFIG_LCD_PORTRAIT
# undef CONFIG_LCD_RPORTRAIT
#elif defined(CONFIG_LCD_RPORTRAIT)
# undef CONFIG_LCD_LANDSCAPE
# undef CONFIG_LCD_PORTRAIT
# undef CONFIG_LCD_RLANDSCAPE
#else
# define CONFIG_LCD_LANDSCAPE 1
# warning "Assuming landscape orientation"
#endif
/* Some important "colors" */
#define SSD1680_Y1_BLACK 0
#define SSD1680_Y1_WHITE 1
/****************************************************************************
* Public Types
****************************************************************************/
struct ssd1680_priv_s
{
bool (*set_vcc) (bool on); /* Allow board to control display power. Return
* true if request state set successfully. */
bool (*set_rst) (bool on); /* Hardware reset support */
bool (*check_busy) (void); /* Checks the state of busy pin */
};
/****************************************************************************
* Public Data
****************************************************************************/
#ifdef __cplusplus
extern "C"
{
#endif
/****************************************************************************
* Public Function Prototypes
****************************************************************************/
/****************************************************************************
* Name: ssd1680initialize
*
* Description:
* Initialize the video hardware. The initial state of the OLED is
* fully initialized, display memory cleared, and the OLED ready to
* use, but with the power setting at 0 (full off == sleep mode).
*
* Input Parameters:
*
* dev - A reference to the SPI driver instance.
* board_priv - Board specific structure.
*
* Returned Value:
*
* On success, this function returns a reference to the LCD object for
* the specified OLED. NULL is returned on any failure.
*
****************************************************************************/
struct lcd_dev_s; /* See include/nuttx/lcd/lcd.h */
struct spi_dev_s; /* See include/nuttx/spi/spi.h */
FAR struct lcd_dev_s *ssd1680_initialize(FAR struct spi_dev_s *dev,
FAR const struct ssd1680_priv_s *board_priv);
#ifdef __cplusplus
}
#endif
#endif /* CONFIG_LCD_SSD1680 */
#endif /* __INCLUDE_NUTTX_LCD_SSD1680_H */
|
d43d40a618046354760916407294dac886411e3a
|
4ce56f791d898fadc8fee56c362b5c26322dbfda
|
/platform/riscos/!GtEscape/zxsave.c
|
a350ebe0e20aeb1a6a58dc3ac333c2cee6a82639
|
[] |
no_license
|
dpt/The-Great-Escape-in-C
|
7dbeb2d308b6bfcbc40bd46b5c457617069d8b32
|
f17b488c83de06374c6a8bfdb6fbc95a8866c2dc
|
refs/heads/master
| 2022-10-28T00:06:17.642644
| 2022-10-19T18:36:05
| 2022-10-19T18:36:05
| 25,271,138
| 110
| 13
| null | 2022-04-19T23:44:07
| 2014-10-15T19:57:53
|
C
|
UTF-8
|
C
| false
| false
| 3,025
|
c
|
zxsave.c
|
/* --------------------------------------------------------------------------
* Name: zxsave.c
* Purpose: Viewer save dialogue handler
* ----------------------------------------------------------------------- */
#include <stddef.h>
#include "fortify/fortify.h"
#include "oslib/types.h"
#include "oslib/osfile.h"
#include "appengine/dialogues/save.h"
#include "appengine/wimp/dialogue.h"
#include "globals.h"
#include "menunames.h"
#include "zxgame.h"
#include "zxsave.h"
/* ----------------------------------------------------------------------- */
dialogue_t *zxgamesave_dlg;
static bits save_type; /* 1 for game, else screenshot */
/* ----------------------------------------------------------------------- */
static void zxgamesave_dlg_fillout(dialogue_t *d, void *opaque)
{
zxgame_t *zxgame;
const char *file_name;
NOT_USED(opaque);
zxgame = GLOBALS.current_zxgame;
if (zxgame == NULL)
return;
if (save_type != osfile_TYPE_SPRITE)
file_name = "Escape";
else
file_name = "Screenshot";
save_set_file_name(d, file_name);
save_set_file_type(d, save_type);
}
/* Called on 'Save' button clicks, but not on drag saves. */
static void zxgamesave_dlg_handler(dialogue_t *d, const char *file_name)
{
zxgame_t *zxgame;
NOT_USED(d);
zxgame = GLOBALS.current_zxgame;
if (zxgame == NULL)
return;
// fixme - error handling
if (save_type != osfile_TYPE_SPRITE)
zxgame_save_game(zxgame, file_name);
else
zxgame_save_screenshot(zxgame, file_name);
}
/* ----------------------------------------------------------------------- */
static int zxgamesave_menu_warning(wimp_message *message, void *handle)
{
wimp_message_menu_warning *menu_warning;
NOT_USED(handle);
menu_warning = (wimp_message_menu_warning *) &message->data;
if (menu_warning->selection.items[0] == ZXGAME_SAVE)
{
switch (menu_warning->selection.items[1])
{
case 0:
save_type = APPFILETYPE;
return event_HANDLED;
case 1:
save_type = osfile_TYPE_SPRITE;
return event_HANDLED;
}
}
return event_NOT_HANDLED;
}
/* ----------------------------------------------------------------------- */
void zxgamesave_show_game(void)
{
save_type = APPFILETYPE;
dialogue_show(zxgamesave_dlg);
}
void zxgamesave_show_screenshot(void)
{
save_type = osfile_TYPE_SPRITE;
dialogue_show(zxgamesave_dlg);
}
/* ----------------------------------------------------------------------- */
result_t zxgamesave_dlg_init(void)
{
dialogue_t *save;
save = save_create();
if (save == NULL)
return result_OOM;
dialogue_set_fillout_handler(save, zxgamesave_dlg_fillout, NULL);
dialogue_set_menu_warning_handler(save, zxgamesave_menu_warning);
save_set_save_handler(save, zxgamesave_dlg_handler);
zxgamesave_dlg = save;
return result_OK;
}
void zxgamesave_dlg_fin(void)
{
save_destroy(zxgamesave_dlg);
}
/* ----------------------------------------------------------------------- */
// vim: ts=8 sts=2 sw=2 et
|
cc744fe036b679c3d8a94856d6a090213b1364b7
|
ea8fc70c7dbf49059431fa45a940742736c68fb8
|
/ext/mvc/orm.c
|
715809fa3e9821e08224eca63b49cc054ee7cf66
|
[
"BSD-3-Clause"
] |
permissive
|
dreamsxin/cphalcon7
|
1bd2194a251657b48857326927db69fef617ab91
|
1b8c6b04b4ca237a5ead87d4752df0d2e85c7a9d
|
refs/heads/master
| 2023-03-08T04:53:08.829432
| 2022-07-07T07:48:59
| 2022-07-07T07:48:59
| 47,245,335
| 298
| 73
| null | 2021-06-22T11:53:25
| 2015-12-02T07:44:43
|
C
|
UTF-8
|
C
| false
| false
| 14,069
|
c
|
orm.c
|
/*
+------------------------------------------------------------------------+
| Phalcon Framework |
+------------------------------------------------------------------------+
| Copyright (c) 2011-2014 Phalcon Team (http://www.phalconphp.com) |
+------------------------------------------------------------------------+
| This source file is subject to the New BSD License that is bundled |
| with this package in the file docs/LICENSE.txt. |
| |
| If you did not receive a copy of the license and are unable to |
| obtain it through the world-wide-web, please send an email |
| to license@phalconphp.com so we can send you a copy immediately. |
+------------------------------------------------------------------------+
| Authors: Andres Gutierrez <andres@phalconphp.com> |
| Eduar Carvajal <eduar@phalconphp.com> |
| ZhuZongXin <dreamsxin@qq.com> |
+------------------------------------------------------------------------+
*/
#include "mvc/orm.h"
#include "mvc/model.h"
#include "mvc/model/exception.h"
#include "diinterface.h"
#include "di/injectable.h"
#include "debug.h"
#include <Zend/zend_closures.h>
#include "kernel/main.h"
#include "kernel/memory.h"
#include "kernel/fcall.h"
#include "kernel/exception.h"
#include "kernel/object.h"
#include "kernel/hash.h"
#include "kernel/array.h"
#include "kernel/operators.h"
#include "kernel/string.h"
#include "kernel/concat.h"
#include "kernel/debug.h"
#include "interned-strings.h"
/**
* Phalcon\Mvc\ORM
*
*/
zend_class_entry *phalcon_mvc_orm_ce;
PHP_METHOD(Phalcon_Mvc_ORM, factory);
PHP_METHOD(Phalcon_Mvc_ORM, __construct);
PHP_METHOD(Phalcon_Mvc_ORM, new);
PHP_METHOD(Phalcon_Mvc_ORM, find);
PHP_METHOD(Phalcon_Mvc_ORM, findFirst);
PHP_METHOD(Phalcon_Mvc_ORM, group);
PHP_METHOD(Phalcon_Mvc_ORM, count);
PHP_METHOD(Phalcon_Mvc_ORM, sum);
PHP_METHOD(Phalcon_Mvc_ORM, maximum);
PHP_METHOD(Phalcon_Mvc_ORM, minimum);
PHP_METHOD(Phalcon_Mvc_ORM, average);
ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_mvc_orm_factory, 0, 0, 1)
ZEND_ARG_TYPE_INFO(0, tableName, IS_STRING, 0)
ZEND_ARG_TYPE_INFO(0, className, IS_STRING, 1)
ZEND_ARG_TYPE_INFO(0, suffix, _IS_BOOL, 1)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_mvc_orm___construct, 0, 0, 1)
ZEND_ARG_TYPE_INFO(0, tableName, IS_STRING, 0)
ZEND_ARG_TYPE_INFO(0, className, IS_STRING, 1)
ZEND_ARG_TYPE_INFO(0, suffix, _IS_BOOL, 1)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_mvc_orm_find, 0, 0, 0)
ZEND_ARG_TYPE_INFO(0, params, IS_ARRAY, 1)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_mvc_orm_findfirst, 0, 0, 0)
ZEND_ARG_TYPE_INFO(0, params, IS_ARRAY, 1)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_mvc_orm_group, 0, 0, 1)
ZEND_ARG_TYPE_INFO(0, params, IS_ARRAY, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_mvc_orm_count, 0, 0, 0)
ZEND_ARG_INFO(0, parameters)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_mvc_orm_sum, 0, 0, 0)
ZEND_ARG_INFO(0, parameters)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_mvc_orm_maximum, 0, 0, 0)
ZEND_ARG_INFO(0, parameters)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_mvc_orm_minimum, 0, 0, 0)
ZEND_ARG_INFO(0, parameters)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_phalcon_mvc_orm_average, 0, 0, 0)
ZEND_ARG_INFO(0, parameters)
ZEND_END_ARG_INFO()
static const zend_function_entry phalcon_mvc_orm_method_entry[] = {
PHP_ME(Phalcon_Mvc_ORM, factory, arginfo_phalcon_mvc_orm_factory, ZEND_ACC_PUBLIC|ZEND_ACC_STATIC)
PHP_ME(Phalcon_Mvc_ORM, __construct, arginfo_phalcon_mvc_orm___construct, ZEND_ACC_PUBLIC|ZEND_ACC_FINAL|ZEND_ACC_CTOR)
PHP_ME(Phalcon_Mvc_ORM, new, arginfo_empty, ZEND_ACC_PUBLIC)
PHP_ME(Phalcon_Mvc_ORM, find, arginfo_phalcon_mvc_orm_find, ZEND_ACC_PUBLIC)
PHP_ME(Phalcon_Mvc_ORM, findFirst, arginfo_phalcon_mvc_orm_findfirst, ZEND_ACC_PUBLIC)
PHP_ME(Phalcon_Mvc_ORM, group, arginfo_phalcon_mvc_orm_group, ZEND_ACC_PUBLIC)
PHP_ME(Phalcon_Mvc_ORM, count, arginfo_phalcon_mvc_orm_count, ZEND_ACC_PUBLIC)
PHP_ME(Phalcon_Mvc_ORM, sum, arginfo_phalcon_mvc_orm_sum, ZEND_ACC_PUBLIC)
PHP_ME(Phalcon_Mvc_ORM, maximum, arginfo_phalcon_mvc_orm_maximum, ZEND_ACC_PUBLIC)
PHP_ME(Phalcon_Mvc_ORM, minimum, arginfo_phalcon_mvc_orm_minimum, ZEND_ACC_PUBLIC)
PHP_ME(Phalcon_Mvc_ORM, average, arginfo_phalcon_mvc_orm_average, ZEND_ACC_PUBLIC)
PHP_FE_END
};
/**
* Phalcon\Mvc\ORM initializer
*/
PHALCON_INIT_CLASS(Phalcon_Mvc_ORM){
PHALCON_REGISTER_CLASS_EX(Phalcon\\Mvc, ORM, mvc_orm, phalcon_di_injectable_ce, phalcon_mvc_orm_method_entry, 0);
zend_declare_property_null(phalcon_mvc_orm_ce, SL("_tableName"), ZEND_ACC_PROTECTED);
zend_declare_property_null(phalcon_mvc_orm_ce, SL("_className"), ZEND_ACC_PROTECTED);
return SUCCESS;
}
/**
* Loads an ORM and prepares it for manipulation
*
* <code>
*
* $robot = ORM::factory('robots')->new();
* $robot->type = 'mechanical';
* $robot->name = 'Astro Boy';
* $robot->year = 1952;
* if ($robot->save() == false) {
* foreach ($robot->getMessages() as $message) {
* echo $message;
* }
* }
*
* </code>
*
* @param string $tableName
* @param string $className
* @param boolean $suffix
* @return Phalcon\Mvc\ORM
**/
PHP_METHOD(Phalcon_Mvc_ORM, factory)
{
zval *table_name, *class_name = NULL, *suffix = NULL;
phalcon_fetch_params(0, 1, 2, &table_name, &class_name, &suffix);
if (!class_name) {
class_name = &PHALCON_GLOBAL(z_null);
}
if (!suffix) {
suffix = &PHALCON_GLOBAL(z_false);
}
object_init_ex(return_value, phalcon_mvc_orm_ce);
PHALCON_CALL_METHOD(NULL, return_value, "__construct", table_name, class_name, suffix);
}
/**
* Phalcon\Mvc\ORM constructor
*
* @param string $tableName
* @param string $className
* @param boolean $suffix
* @return Phalcon\Mvc\ORM
*/
PHP_METHOD(Phalcon_Mvc_ORM, __construct){
zval *table_name, *class_name = NULL, *suffix = NULL;
zval tmp = {};
phalcon_fetch_params(1, 1, 1, &table_name, &class_name, &suffix);
if (!class_name) {
class_name = &PHALCON_GLOBAL(z_null);
}
if (!suffix) {
suffix = &PHALCON_GLOBAL(z_false);
}
phalcon_update_property(getThis(), SL("_tableName"), table_name);
if (PHALCON_IS_NOT_EMPTY(class_name)) {
phalcon_update_property(getThis(), SL("_className"), class_name);
} else {
phalcon_camelize(&tmp, table_name);
PHALCON_MM_ADD_ENTRY(&tmp);
if (zend_is_true(suffix)) {
PHALCON_SCONCAT_STR(&tmp, "Model");
PHALCON_MM_ADD_ENTRY(&tmp);
}
phalcon_update_property(getThis(), SL("_className"), &tmp);
class_name = &tmp;
}
PHALCON_MM_CALL_CE_STATIC(NULL, phalcon_mvc_model_ce, "register", class_name, table_name);
RETURN_MM_THIS();
}
/**
* Create a model
*
* @return Phalcon\Mvc\Model
*/
PHP_METHOD(Phalcon_Mvc_ORM, new){
zval class_name = {}, service_name = {}, manager = {};
PHALCON_MM_INIT();
phalcon_read_property(&class_name, getThis(), SL("_className"), PH_READONLY);
ZVAL_STR(&service_name, IS(modelsManager));
PHALCON_MM_CALL_METHOD(&manager, getThis(), "getresolveservice", &service_name);
PHALCON_MM_ADD_ENTRY(&manager);
PHALCON_MM_CALL_METHOD(return_value, &manager, "load", &class_name);
RETURN_MM();
}
/**
* Allows to query a set of records that match the specified conditions
*
* <code>
*
* $robots = ORM::factory('robots')->find()
*
* </code>
*
* @param array $params
* @return Phalcon\Mvc\Model\ResultsetInterface
*/
PHP_METHOD(Phalcon_Mvc_ORM, find){
zval *params = NULL, class_name = {};
zend_class_entry *ce;
phalcon_fetch_params(1, 0, 1, ¶ms);
if (!params) {
params = &PHALCON_GLOBAL(z_null);
}
phalcon_read_property(&class_name, getThis(), SL("_className"), PH_READONLY);
ce = phalcon_class_exists_ex(&class_name, 0);
PHALCON_VERIFY_CLASS_CE_EX(ce, phalcon_mvc_model_ce, phalcon_mvc_model_exception_ce);
PHALCON_CALL_CE_STATIC(return_value, ce, "find", params);
RETURN_MM();
}
/**
* Allows to query the first record that match the specified conditions
*
* <code>
*
* $robot = ORM::factory('robots')->findFirst();
* echo "The robot name is ", $robot->name;
*
* </code>
*
* @param array $params
* @return Phalcon\Mvc\Model
*/
PHP_METHOD(Phalcon_Mvc_ORM, findFirst){
zval *params = NULL, class_name = {};
zend_class_entry *ce;
phalcon_fetch_params(1, 0, 1, ¶ms);
if (!params) {
params = &PHALCON_GLOBAL(z_null);
}
phalcon_read_property(&class_name, getThis(), SL("_className"), PH_READONLY);
ce = phalcon_class_exists_ex(&class_name, 0);
PHALCON_VERIFY_CLASS_CE_EX(ce, phalcon_mvc_model_ce, phalcon_mvc_model_exception_ce);
PHALCON_CALL_CE_STATIC(return_value, ce, "findfirst", params);
RETURN_MM();
}
/**
* Generate a PHQL SELECT statement for an aggregate
*
*<code>
*
* ORM::factory('robots')->group(['aggregators' => array(array('column' => 'id', 'aggregator' => 'sum'), 'sumatory' => array('column' => 'price', 'aggregator' => 'sum'))]);
*
*</code>
*
* @param array $params
* @return Phalcon\Mvc\Model\ResultsetInterface
*/
PHP_METHOD(Phalcon_Mvc_ORM, group){
zval *params = NULL, class_name = {};
zend_class_entry *ce;
phalcon_fetch_params(1, 0, 1, ¶ms);
if (!params) {
params = &PHALCON_GLOBAL(z_null);
}
phalcon_read_property(&class_name, getThis(), SL("_className"), PH_READONLY);
ce = phalcon_class_exists_ex(&class_name, 0);
PHALCON_VERIFY_CLASS_CE_EX(ce, phalcon_mvc_model_ce, phalcon_mvc_model_exception_ce);
PHALCON_CALL_CE_STATIC(return_value, ce, "group", params);
RETURN_MM();
}
/**
* Allows to count how many records match the specified conditions
*
* <code>
*
* $number = Robots::count();
*
* </code>
*
* @param array $params
* @return int
*/
PHP_METHOD(Phalcon_Mvc_ORM, count){
zval *params = NULL, class_name = {};
zend_class_entry *ce;
phalcon_fetch_params(1, 0, 1, ¶ms);
if (!params) {
params = &PHALCON_GLOBAL(z_null);
}
phalcon_read_property(&class_name, getThis(), SL("_className"), PH_READONLY);
ce = phalcon_class_exists_ex(&class_name, 0);
PHALCON_VERIFY_CLASS_CE_EX(ce, phalcon_mvc_model_ce, phalcon_mvc_model_exception_ce);
PHALCON_CALL_CE_STATIC(return_value, ce, "count", params);
RETURN_MM();
}
/**
* Allows to calculate a summatory on a column that match the specified conditions
*
* <code>
*
* $sum = Robots::sum(array('column' => 'price'));
*
* </code>
*
* @param array $params
* @return double
*/
PHP_METHOD(Phalcon_Mvc_ORM, sum){
zval *params = NULL, class_name = {};
zend_class_entry *ce;
phalcon_fetch_params(1, 0, 1, ¶ms);
if (!params) {
params = &PHALCON_GLOBAL(z_null);
}
phalcon_read_property(&class_name, getThis(), SL("_className"), PH_READONLY);
ce = phalcon_class_exists_ex(&class_name, 0);
PHALCON_VERIFY_CLASS_CE_EX(ce, phalcon_mvc_model_ce, phalcon_mvc_model_exception_ce);
PHALCON_CALL_CE_STATIC(return_value, ce, "sum", params);
RETURN_MM();
}
/**
* Allows to get the maximum value of a column that match the specified conditions
*
* <code>
*
* $id = Robots::maximum(array('column' => 'id'));
*
* </code>
*
* @param array $params
* @return mixed
*/
PHP_METHOD(Phalcon_Mvc_ORM, maximum){
zval *params = NULL, class_name = {};
zend_class_entry *ce;
phalcon_fetch_params(1, 0, 1, ¶ms);
if (!params) {
params = &PHALCON_GLOBAL(z_null);
}
phalcon_read_property(&class_name, getThis(), SL("_className"), PH_READONLY);
ce = phalcon_class_exists_ex(&class_name, 0);
PHALCON_VERIFY_CLASS_CE_EX(ce, phalcon_mvc_model_ce, phalcon_mvc_model_exception_ce);
PHALCON_CALL_CE_STATIC(return_value, ce, "maximum", params);
RETURN_MM();
}
/**
* Allows to get the minimum value of a column that match the specified conditions
*
* <code>
*
* $id = Robots::minimum(array('column' => 'id'));
*
* </code>
*
* @param array $params
* @return mixed
*/
PHP_METHOD(Phalcon_Mvc_ORM, minimum){
zval *params = NULL, class_name = {};
zend_class_entry *ce;
phalcon_fetch_params(1, 0, 1, ¶ms);
if (!params) {
params = &PHALCON_GLOBAL(z_null);
}
phalcon_read_property(&class_name, getThis(), SL("_className"), PH_READONLY);
ce = phalcon_class_exists_ex(&class_name, 0);
PHALCON_VERIFY_CLASS_CE_EX(ce, phalcon_mvc_model_ce, phalcon_mvc_model_exception_ce);
PHALCON_CALL_CE_STATIC(return_value, ce, "minimum", params);
RETURN_MM();
}
/**
* Allows to calculate the average value on a column matching the specified conditions
*
* <code>
*
* //What's the average price of robots?
* $average = Robots::average(array('column' => 'price'));
* echo "The average price is ", $average, "\n";
*
* //What's the average price of mechanical robots?
* $average = Robots::average(array("type='mechanical'", 'column' => 'price'));
* echo "The average price of mechanical robots is ", $average, "\n";
*
* </code>
*
* @param array $parameters
* @return double
*/
PHP_METHOD(Phalcon_Mvc_ORM, average){
zval *params = NULL, class_name = {};
zend_class_entry *ce;
phalcon_fetch_params(1, 0, 1, ¶ms);
if (!params) {
params = &PHALCON_GLOBAL(z_null);
}
phalcon_read_property(&class_name, getThis(), SL("_className"), PH_READONLY);
ce = phalcon_class_exists_ex(&class_name, 0);
PHALCON_VERIFY_CLASS_CE_EX(ce, phalcon_mvc_model_ce, phalcon_mvc_model_exception_ce);
PHALCON_CALL_CE_STATIC(return_value, ce, "average", params);
RETURN_MM();
}
|
f2dae4eb668c177195887b67436e57d5cb6dc1bd
|
28d0f8c01599f8f6c711bdde0b59f9c2cd221203
|
/sys/rump/librump/rumpkern/rumpkern_if_wrappers.c
|
c6cede538a4a48bd11e699392368bb8b65a1f5e5
|
[] |
no_license
|
NetBSD/src
|
1a9cbc22ed778be638b37869ed4fb5c8dd616166
|
23ee83f7c0aea0777bd89d8ebd7f0cde9880d13c
|
refs/heads/trunk
| 2023-08-31T13:24:58.105962
| 2023-08-27T15:50:47
| 2023-08-27T15:50:47
| 88,439,547
| 656
| 348
| null | 2023-07-20T20:07:24
| 2017-04-16T20:03:43
| null |
UTF-8
|
C
| false
| false
| 3,551
|
c
|
rumpkern_if_wrappers.c
|
/* $NetBSD: rumpkern_if_wrappers.c,v 1.18 2016/01/26 23:25:41 pooka Exp $ */
/*
* Automatically generated. DO NOT EDIT.
* from: NetBSD: rumpkern.ifspec,v 1.14 2016/01/26 23:22:22 pooka Exp
* by: NetBSD: makerumpif.sh,v 1.10 2016/01/26 23:21:18 pooka Exp
*/
#include <sys/cdefs.h>
#include <sys/systm.h>
#include <rump-sys/kern.h>
#include <rump-sys/kern_if.h>
#include <rump/rump.h>
#include <rump/rumpkern_if_pub.h>
void __dead rump_kern_unavailable(void);
void __dead
rump_kern_unavailable(void)
{
panic("kern interface unavailable");
}
int
rump_pub_module_init(const struct modinfo * const *arg1, size_t arg2)
{
int rv;
rump_schedule();
rv = rump_module_init(arg1, arg2);
rump_unschedule();
return rv;
}
int
rump_pub_module_fini(const struct modinfo *arg1)
{
int rv;
rump_schedule();
rv = rump_module_fini(arg1);
rump_unschedule();
return rv;
}
int
rump_pub_kernelfsym_load(void *arg1, uint64_t arg2, char *arg3, uint64_t arg4)
{
int rv;
rump_schedule();
rv = rump_kernelfsym_load(arg1, arg2, arg3, arg4);
rump_unschedule();
return rv;
}
struct uio *
rump_pub_uio_setup(void *arg1, size_t arg2, off_t arg3, enum rump_uiorw arg4)
{
struct uio * rv;
rump_schedule();
rv = rump_uio_setup(arg1, arg2, arg3, arg4);
rump_unschedule();
return rv;
}
size_t
rump_pub_uio_getresid(struct uio *arg1)
{
size_t rv;
rump_schedule();
rv = rump_uio_getresid(arg1);
rump_unschedule();
return rv;
}
off_t
rump_pub_uio_getoff(struct uio *arg1)
{
off_t rv;
rump_schedule();
rv = rump_uio_getoff(arg1);
rump_unschedule();
return rv;
}
size_t
rump_pub_uio_free(struct uio *arg1)
{
size_t rv;
rump_schedule();
rv = rump_uio_free(arg1);
rump_unschedule();
return rv;
}
struct kauth_cred*
rump_pub_cred_create(uid_t arg1, gid_t arg2, size_t arg3, gid_t *arg4)
{
struct kauth_cred* rv;
rump_schedule();
rv = rump_cred_create(arg1, arg2, arg3, arg4);
rump_unschedule();
return rv;
}
void
rump_pub_cred_put(struct kauth_cred *arg1)
{
rump_schedule();
rump_cred_put(arg1);
rump_unschedule();
}
int
rump_pub_lwproc_rfork(int arg1)
{
int rv;
rump_schedule();
rv = rump_lwproc_rfork(arg1);
rump_unschedule();
return rv;
}
int
rump_pub_lwproc_newlwp(pid_t arg1)
{
int rv;
rump_schedule();
rv = rump_lwproc_newlwp(arg1);
rump_unschedule();
return rv;
}
void
rump_pub_lwproc_switch(struct lwp *arg1)
{
rump_schedule();
rump_lwproc_switch(arg1);
rump_unschedule();
}
void
rump_pub_lwproc_releaselwp(void)
{
rump_schedule();
rump_lwproc_releaselwp();
rump_unschedule();
}
struct lwp *
rump_pub_lwproc_curlwp(void)
{
struct lwp * rv;
rump_schedule();
rv = rump_lwproc_curlwp();
rump_unschedule();
return rv;
}
void
rump_pub_lwproc_sysent_usenative(void)
{
rump_schedule();
rump_lwproc_sysent_usenative();
rump_unschedule();
}
void
rump_pub_allbetsareoff_setid(pid_t arg1, int arg2)
{
rump_schedule();
rump_allbetsareoff_setid(arg1, arg2);
rump_unschedule();
}
int
rump_pub_etfs_register(const char *arg1, const char *arg2, enum rump_etfs_type arg3)
{
int rv;
rump_schedule();
rv = rump_etfs_register(arg1, arg2, arg3);
rump_unschedule();
return rv;
}
int
rump_pub_etfs_register_withsize(const char *arg1, const char *arg2, enum rump_etfs_type arg3, uint64_t arg4, uint64_t arg5)
{
int rv;
rump_schedule();
rv = rump_etfs_register_withsize(arg1, arg2, arg3, arg4, arg5);
rump_unschedule();
return rv;
}
int
rump_pub_etfs_remove(const char *arg1)
{
int rv;
rump_schedule();
rv = rump_etfs_remove(arg1);
rump_unschedule();
return rv;
}
|
01a29752617a879374850e27fcbdce3e5a81b7ff
|
03666e5f961946fc1a0ac67781ac1425562ef0d7
|
/src/operators/IndexSelect/avtIndexSelectFilter.C
|
aa7c5eadbd5eacd8ea522fdd6a2e41a76ed728c6
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
visit-dav/visit
|
e9f81b4d4b9b9930a0db9d5282cd1bcabf465e2e
|
601ae46e0bef2e18425b482a755d03490ade0493
|
refs/heads/develop
| 2023-09-06T08:19:38.397058
| 2023-09-05T21:29:32
| 2023-09-05T21:29:32
| 165,565,988
| 335
| 120
|
BSD-3-Clause
| 2023-09-14T00:53:37
| 2019-01-13T23:27:26
|
C
|
UTF-8
|
C
| false
| false
| 65,577
|
c
|
avtIndexSelectFilter.C
|
// Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
// Project developers. See the top-level LICENSE file for dates and other
// details. No copyright assignment is required to contribute to VisIt.
// ************************************************************************* //
// File: avtIndexSelectFilter.C
// ************************************************************************* //
#include <avtIndexSelectFilter.h>
#include <visit-config.h> // For LIB_VERSION_LE
#include <avtExecutionManager.h>
#include <vtkCell.h>
#include <vtkCellArray.h>
#include <vtkCellData.h>
#include <vtkCharArray.h>
#include <vtkDataSetReader.h>
#include <vtkDataSetRemoveGhostCells.h>
#include <vtkDataSetWriter.h>
#include <vtkMaskPoints.h>
#include <vtkIntArray.h>
#include <vtkPointData.h>
#include <vtkPolyData.h>
#include <vtkRectilinearGrid.h>
#include <vtkStructuredGrid.h>
#include <vtkUnsignedIntArray.h>
#include <vtkUnstructuredGrid.h>
#include <vtkVisItExtractGrid.h>
#include <vtkVisItExtractRectilinearGrid.h>
#include <vtkVisItUtility.h>
#include <avtCallback.h>
#include <avtDatasetExaminer.h>
#include <avtLogicalSelection.h>
#include <avtSILNamespace.h>
#include <avtSILRestrictionTraverser.h>
#include <avtOriginatingSource.h>
#include <avtParallel.h>
#include <DebugStream.h>
#include <ImproperUseException.h>
#include <InvalidSetException.h>
#include <InvalidCategoryException.h>
#include <InvalidVariableException.h>
#ifdef PARALLEL
#include <mpi.h>
#endif
#include <string>
#include <vector>
using std::string;
using std::vector;
// ****************************************************************************
// Method: avtIndexSelectFilter constructor
//
// Programmer: childs -- generated by xml2info
// Creation: Wed Jun 5 09:09:11 PDT 2002
//
// Modifications:
//
// Mark C. Miller, Tue Sep 28 19:57:42 PDT 2004
// Added data selection id
//
// Kathleen Bonnell, Tue May 10 11:19:24 PDT 2005
// Use VisIt versions of vtkExtractGrid and vtkExtractRectilinearGrid,
// they have been modified to correctly handle cell data when VOI is
// along max boundary.
//
// Kathleen Bonnell, Mon Jan 30 15:10:26 PST 2006
// Add vtkMaskPoints for a points filter.
//
// Kathleen Bonnell, Thu Jun 7 14:37:49 PDT 2007
// Added groupCategory.
//
// Kathleen Bonnell, Thu Jun 21 16:31:59 PDT 2007
// Added amrLevel, amrMesh.
//
// Kathleen Biagas, Wed Jan 11 13:58:31 PST 2012
// Turn on SingleCellPerVertex for vtkMaskPoints.
//
// Kathleen Biagas, Tue Jun 9 09:31:15 MST 2015
// Added globalDims, lspace.
//
// Alister Maguire, Wed Oct 26 11:34:15 PDT 2016
// Removed curvilinearFilter, rectilinearFilter,
// and pointsFilter for thread safety. They now
// exist as stack variables in ExecuteData.
//
// ****************************************************************************
avtIndexSelectFilter::avtIndexSelectFilter() : lspace()
{
haveIssuedWarning = false;
selID = -1;
amrLevel = -1;
amrMesh = false;
groupCategory = false;
for (int i = 0; i < 3; ++i)
{
globalDims[i] = INT_MAX;
globalDims[i+3] = -1;
}
}
// ****************************************************************************
// Method: avtIndexSelectFilter destructor
//
// Programmer: childs -- generated by xml2info
// Creation: Wed Jun 5 09:09:11 PDT 2002
//
// Modifications:
// Kathleen Bonnell, Mon Jan 30 15:10:26 PST 2006
// Delete vtkMaskPoints.
//
// Alister Maguire, Wed Oct 26 11:34:15 PDT 2016
// Removed curvilinearFilter, rectilinearFilter,
// and pointsFilter for thread safety. They now
// exist as stack variables in ExecuteData.
//
// ****************************************************************************
avtIndexSelectFilter::~avtIndexSelectFilter()
{
}
// ****************************************************************************
// Method: avtIndexSelectFilter::Create
//
// Programmer: childs -- generated by xml2info
// Creation: Wed Jun 5 09:09:11 PDT 2002
//
// ****************************************************************************
avtFilter *
avtIndexSelectFilter::Create()
{
return new avtIndexSelectFilter();
}
// ****************************************************************************
// Method: avtIndexSelectFilter::SetAtts
//
// Purpose:
// Sets the state of the filter based on the attribute object.
//
// Arguments:
// a The attributes to use.
//
// Programmer: childs -- generated by xml2info
// Creation: Wed Jun 5 09:09:11 PDT 2002
//
// ****************************************************************************
void
avtIndexSelectFilter::SetAtts(const AttributeGroup *a)
{
atts = *(const IndexSelectAttributes*)a;
}
// ****************************************************************************
// Method: avtIndexSelectFilter::PrepareFilters
//
// Purpose:
// Prepares the curvilinear and rectilinear filters for the current
// dataset. This can take into account block indices.
//
// Programmer: Hank Childs
// Creation: June 25, 2002
//
// Modifications:
// Kathleen Bonnell, Wed Sep 8 09:36:30 PDT 2004
// Always Set IncludeBoundary to true for filters, so they can handle
// modulo prolbems (eg sample rate of 3, but dimension is 10).
//
// Kathleen Bonnell, Wed Jul 20 11:39:34 PDT 2005
// Don't subtract 1 from the groupIndices.
//
// Kathleen Bonnell, Mon Jan 30 15:10:26 PST 2006
// Setup vtkMaskPoints.
//
// Kathleen Bonnell, Thu Jun 7 14:37:49 PDT 2007
// Use groupCategory insead of WhichData == OneGroup.
//
// Kathleen Bonnell, Thu Jun 21 16:31:59 PDT 2007
// Added int* amri arg. Read from this arg if AMR mesh in order to
// determine how to correctly compute min/max.
//
// Alister Maguire, Mon Oct 24 12:25:39 PDT 2016
// Added vtkVisItExtgracGrid, vtkVisItExtgractRectilinearGrid,
// and vtkMaskPoints arguments for thread safety.
//
// ****************************************************************************
void
avtIndexSelectFilter::PrepareFilters(int groupIndices[3], int *amri,
vtkVisItExtractGrid *curvilinearFilter,
vtkVisItExtractRectilinearGrid *rectilinearFilter,
vtkMaskPoints *pointsFilter)
{
//
// Only adjust the base index if we are index selecting by group.
//
int bi[3];
if (groupCategory)
{
bi[0] = groupIndices[0];
bi[1] = groupIndices[1];
bi[2] = groupIndices[2];
}
else
{
bi[0] = 0;
bi[1] = 0;
bi[2] = 0;
}
int voi[6];
int minmax[6] = {atts.GetXMin(), atts.GetXMax(), atts.GetYMin(),
atts.GetYMax(), atts.GetZMin(), atts.GetZMax()};
if (amrMesh && groupCategory)
{
int OP = amri[3];
for (int i = 0; i < 3; i++)
{
if (amri[i] > 1)
{
if (OP == 0)
{
// min
minmax[i*2] = minmax[i*2]*amri[i];
// max
minmax[i*2+1] = minmax[i*2+1]*amri[i];
}
else
{
// min
minmax[i*2] = minmax[i*2]/amri[i];
// max
if (minmax[i*2+1] != -1)
minmax[i*2+1] = minmax[i*2+1]/amri[i] + 1;
}
}
}
}
voi[0] = (minmax[0] - bi[0] < 0 ? 0 : minmax[0] - bi[0]);
voi[1] = (minmax[1] < 0 ? 1000000 : minmax[1] - bi[0]);
if (atts.GetDim() == IndexSelectAttributes::TwoD ||
atts.GetDim() == IndexSelectAttributes::ThreeD)
{
voi[2] = (minmax[2] - bi[1] < 0 ? 0 : minmax[2] - bi[1]);
voi[3] = (minmax[3] < 0 ? 1000000 : minmax[3] - bi[1]);
}
else
{
voi[2] = 0;
voi[3] = 1000000;
}
if (atts.GetDim() == IndexSelectAttributes::ThreeD)
{
voi[4] = (minmax[4] - bi[2] < 0 ? 0 : minmax[4] - bi[2]);
voi[5] = (minmax[5] < 0 ? 1000000 : minmax[5] - bi[2]);
}
else
{
voi[4] = 0;
voi[5] = 1000000;
}
curvilinearFilter->SetVOI(voi);
rectilinearFilter->SetVOI(voi);
int sampleRate[3];
sampleRate[0] = atts.GetXIncr();
sampleRate[1] = atts.GetYIncr();
sampleRate[2] = atts.GetZIncr();
curvilinearFilter->SetSampleRate(sampleRate);
rectilinearFilter->SetSampleRate(sampleRate);
pointsFilter->SetOnRatio(sampleRate[0]);
pointsFilter->SetOffset(voi[0]);
if (voi[1] != 1000000)
{
int maxpts = (voi[1] - voi[0]) / sampleRate[0];
pointsFilter->SetMaximumNumberOfPoints(maxpts);
}
else
{
pointsFilter->SetMaximumNumberOfPoints(VTK_INT_MAX);
}
curvilinearFilter->SetIncludeBoundary(1);
rectilinearFilter->SetIncludeBoundary(1);
}
// ****************************************************************************
// Method: avtIndexSelectFilter::Equivalent
//
// Purpose:
// Returns true if creating a new avtIndexSelectFilter with the given
// parameters would result in an equivalent avtIndexSelectFilter.
//
// Programmer: childs -- generated by xml2info
// Creation: Wed Jun 5 09:09:11 PDT 2002
//
// ****************************************************************************
bool
avtIndexSelectFilter::Equivalent(const AttributeGroup *a)
{
return (atts == *(IndexSelectAttributes*)a);
}
// ****************************************************************************
// Method: avtIndexSelectFilter::ExecuteData
//
// Purpose:
// Sends the specified input and output through the IndexSelect filter.
//
// Arguments:
// in_dr The input data representation.
//
// Returns: The output data representation.
//
// Programmer: childs -- generated by xml2info
// Creation: Wed Jun 5 09:09:11 PDT 2002
//
// Modifications:
//
// Hank Childs, Wed Jun 19 09:47:01 PDT 2002
// Fix stupid crash when applying to unstructured grids.
//
// Hank Childs, Tue Jun 25 18:22:35 PDT 2002
// Use group indices when calculating indices.
//
// Hank Childs, Wed Oct 2 10:13:26 PDT 2002
// Added support for meshes that have already been broken up.
//
// Hank Childs, Mon Dec 9 08:53:15 PST 2002
// Account for potentially changing format of avtOriginalCellNumbers.
//
// Kathleen Bonnell, Fri Dec 13 16:41:12 PST 2002
// Use NewInstance instead of MakeObject in order to match vtk's new api.
//
// Jeremy Meredith, Fri Jan 30 17:45:47 PST 2004
// Added code to preserve dataset type if the input was polydata.
//
// Hank Childs, Fri Aug 27 15:25:22 PDT 2004
// Rename ghost data array.
//
// Mark C. Miller, Tue Sep 28 19:57:42 PDT 2004
// Added code to bypass the operator if the selection is applied by a
// plugin
//
// Kathleen Bonnell, Fri Feb 18 09:41:16 PST 2005
// Account for the fact the vtkExtractGrid and vtkRectilinearExtractGrid
// may return 'empty' datasets, so we want to return a NULL dataset.
//
// Kathleen Bonnell, Fri Aug 19 15:45:46 PDT 2005
// Remove ghost nodes array, ensure ghost zones are skipped in
// 'zones not preserved' case. Removed retrieval and use of
// 'avtRealDims'. Its retrieval was incorrect so it was never ever used.
//
// Kathleen Bonnell, Mon Jan 30 15:10:26 PST 2006
// Use vtkMaskPoints for point meshes.
//
// Kathleen Bonnell, Wed May 17 10:46:58 PDT 2006
// Remove call to SetSource(NULL) as it now removes information necessary
// for the dataset.
//
// Kathleen Bonnell, Thu Jun 7 14:37:49 PDT 2007
// Use groupCategory insead of WhichData == OneGroup.
//
// Kathleen Bonnell, Thu Jun 21 16:31:59 PDT 2007
// If this is an AMR mesh, retrieve AMR indices and don't remove ghost zones.
//
// Kathleen Bonnell, Tue Feb 22 11:44:48 PST 2011
// Added code to convert vtkPolyVertex cells returned from vtkMaskPoints
// to vtkVertex cells, as filters down the pipeline may not be able to
// handle vtkPolyVertexCells.
//
// Kathleen Biagas, Wed Jan 11 13:59:21 PST 2012
// Remove hack that converts vtkPolyVertex cells to vtkVertex cells.
//
// Eric Brugger, Mon Jul 28 15:33:52 PDT 2014
// Modified the class to work with avtDataRepresentation.
//
// Eric Brugger, Fri Sep 26 08:48:14 PDT 2014
// I modified the routine to return a NULL in the case where it previously
// returned an avtDataRepresentation with a NULL vtkDataSet.
//
// Kathleen Biagas, Tue Jun 9 09:34:17 MST 2015
// Move Replicate (wrap) to PostExecute, collect globalDims here instead.
//
// Alister Maguire, Tue Nov 8 12:44:55 PST 2016
// Changed curvilinearFilter, rectilinearFilter, and pointsFilter to stack
// variables for thread safety; added amrMesh for thread safety; added
// mutex locks where necessary.
//
// ****************************************************************************
avtDataRepresentation *
avtIndexSelectFilter::ExecuteData(avtDataRepresentation *in_dr)
{
vtkVisItExtractGrid *curvilinearFilter =
vtkVisItExtractGrid::New();
vtkVisItExtractRectilinearGrid *rectilinearFilter =
vtkVisItExtractRectilinearGrid::New();
vtkMaskPoints *pointsFilter = vtkMaskPoints::New();
pointsFilter->GenerateVerticesOn();
pointsFilter->RandomModeOff();
pointsFilter->SingleVertexPerCellOn();
//
//
// Get the VTK data set.
//
vtkDataSet *in_ds = in_dr->GetDataVTK();
vtkDataSet *out_ds = NULL;
int topoDim = GetInput()->GetInfo().GetAttributes().
GetTopologicalDimension();
bool amrMesh = (GetInput()->GetInfo().GetAttributes().GetMeshType() ==
AVT_AMR_MESH);
vtkIntArray *bi_arr = vtkIntArray::SafeDownCast(
in_ds->GetFieldData()->GetArray("base_index"));
//
// If the selection this filter exists to create has already been handled,
// then we can skip execution
//
if (GetInput()->GetInfo().GetAttributes().GetSelectionApplied(selID))
{
debug1 << "Bypassing IndexSelect operator because database plugin "
"claims to have applied the selection already" << endl;
out_ds = in_ds;
}
else if (GetInput()->GetInfo().GetValidity().GetZonesPreserved())
{
//
// We have the normal case -- a structured mesh that we are going to
// index select.
//
vtkDataSet *ds = NULL;
//
// All of our indices are incorrect if we leave the ghost zones in --
// we can also have some weird phenomenon where boundaries are missing
// between blocks.
//
vtkDataSetRemoveGhostCells *removeGhostCells = NULL;
if (!amrMesh &&
in_ds->GetCellData()->GetArray("avtGhostZones"))
{
removeGhostCells = vtkDataSetRemoveGhostCells::New();
removeGhostCells->SetInputData(in_ds);
//
// There is something buggy about the extents when this filter is
// used for repeated executions. Just force the execution now.
//
removeGhostCells->Update();
ds = removeGhostCells->GetOutput()->NewInstance();
ds->ShallowCopy(removeGhostCells->GetOutput());
}
else
{
ds = in_ds;
}
in_ds->GetPointData()->RemoveArray("avtGhostNodes");
//
// The indices should reflect the "base_index"'s, so dummy one up if
// we don't have one.
//
int ind[3] = { 0, 0, 0 };
if (bi_arr != NULL)
{
ind[0] = bi_arr->GetValue(0);
ind[1] = bi_arr->GetValue(1);
ind[2] = bi_arr->GetValue(2);
}
int *amri = NULL;
if (amrMesh && groupCategory)
{
vtkIntArray *amrdims = (vtkIntArray *)
in_ds->GetFieldData()->GetArray("avtAMRDimensions");
if (amrdims == NULL)
{
if (!haveIssuedWarning)
{
avtCallback::IssueWarning("An internal error occurred and "
"the index select operator was not applied.");
VisitMutexLock("avtIndexSelectFilter::ThreadSafeExecuteData");
haveIssuedWarning = true;
VisitMutexUnlock("avtIndexSelectFilter::ThreadSafeExecuteData");
}
return in_dr;
}
else
{
amri = (int*)amrdims->GetVoidPointer(0);
}
}
PrepareFilters(ind, amri, curvilinearFilter, rectilinearFilter,
pointsFilter);
vtkDataSet *rv = NULL;
int dstype = ds->GetDataObjectType();
if (dstype == VTK_STRUCTURED_GRID)
{
curvilinearFilter->SetInputData(ds);
curvilinearFilter->Update();
rv = curvilinearFilter->GetOutput();
}
else if (dstype == VTK_RECTILINEAR_GRID)
{
rectilinearFilter->SetInputData(ds);
rectilinearFilter->Update();
rv = rectilinearFilter->GetOutput();
}
else if (topoDim == 0 &&
(dstype == VTK_POLY_DATA || dstype == VTK_UNSTRUCTURED_GRID))
{
pointsFilter->SetInputData(ds);
pointsFilter->Update();
rv = pointsFilter->GetOutput();
}
else
{
if (!haveIssuedWarning)
{
avtCallback::IssueWarning("The index select operator was "
"applied to a non-structured mesh. It is not "
"being applied.");
VisitMutexLock("avtIndexSelectFilter::ThreadSafeExecuteData");
haveIssuedWarning = true;
VisitMutexUnlock("avtIndexSelectFilter::ThreadSafeExecuteData");
}
return in_dr;
}
if (removeGhostCells != NULL)
{
removeGhostCells->Delete();
}
if (rv->GetNumberOfPoints() <= 0 || rv->GetNumberOfCells() <= 0)
{
return NULL;
}
out_ds = (vtkDataSet *) rv->NewInstance();
out_ds->ShallowCopy(rv);
}
else
{
//
// The dataset has been changed before it got here -- most likely it
// was material selected. We should have passed enough clues
// downstream to figure out what happened.
//
vtkUnsignedIntArray *origZones = (vtkUnsignedIntArray *)
in_ds->GetCellData()->
GetArray("avtOriginalCellNumbers");
if (origZones == NULL)
{
if (!haveIssuedWarning)
{
avtCallback::IssueWarning("An internal error occurred and the "
"index select operator was not "
"applied.");
VisitMutexLock("avtIndexSelectFilter::ThreadSafeExecuteData");
haveIssuedWarning = true;
VisitMutexUnlock("avtIndexSelectFilter::ThreadSafeExecuteData");
}
return in_dr;
}
vtkUnsignedIntArray *dims = (vtkUnsignedIntArray *)
in_ds->GetFieldData()->GetArray(
"avtOriginalStructuredDimensions");
if (dims == NULL)
{
if (!haveIssuedWarning)
{
avtCallback::IssueWarning("An internal error occurred and the "
"index select operator was not "
"applied.");
VisitMutexLock("avtIndexSelectFilter::ThreadSafeExecuteData");
haveIssuedWarning = true;
VisitMutexUnlock("avtIndexSelectFilter::ThreadSafeExecuteData");
}
return in_dr;
}
int d[3];
d[0] = dims->GetValue(0);
d[1] = dims->GetValue(1);
d[2] = dims->GetValue(2);
int base[3] = { 0, 0, 0 };
if (groupCategory)
{
if (bi_arr != NULL)
{
base[0] = bi_arr->GetValue(0);
base[1] = bi_arr->GetValue(1);
base[2] = bi_arr->GetValue(2);
}
}
vtkDataArray *ghosts = in_ds->GetCellData()->GetArray("avtGhostZones");
unsigned char *gz = NULL;
if (ghosts)
gz = (unsigned char *)ghosts->GetVoidPointer(0);
//
// We should have everything lined up now -- we know what the original
// indexing of the structured mesh was and what the base index is.
//
vtkUnstructuredGrid *out_ug = vtkUnstructuredGrid::New();
vtkPoints *p1 = vtkVisItUtility::GetPoints(in_ds);
out_ug->SetPoints(p1);
p1->Delete();
out_ug->GetPointData()->PassData(in_ds->GetPointData());
vtkCellData *out_cd = out_ug->GetCellData();
vtkCellData *in_cd = in_ds->GetCellData();
out_cd->CopyAllocate(in_cd);
int ncells = in_ds->GetNumberOfCells();
out_ug->Allocate(ncells);
int out_cell = 0;
int xmin = atts.GetXMin();
int xmax = atts.GetXMax();
xmax = (xmax < 0 ? 10000000 : xmax);
int ymin = atts.GetYMin();
int ymax = atts.GetYMax();
ymax = (ymax < 0 ? 10000000 : ymax);
int zmin = atts.GetZMin();
int zmax = atts.GetZMax();
zmax = (zmax < 0 ? 10000000 : zmax);
int ncomps = origZones->GetNumberOfComponents();
int comp = ncomps-1;
for (int i = 0 ; i < ncells ; i++)
{
int cell_id = (int) origZones->GetComponent(i, comp);
int x = cell_id % (d[0]-1);
int y = (cell_id / (d[0]-1)) % (d[1]-1);
int z = cell_id / ((d[0]-1)*(d[1]-1));
x += base[0];
y += base[1];
z += base[2];
if (x < xmin || x >= xmax)
{
continue;
}
if (atts.GetDim() == IndexSelectAttributes::TwoD ||
atts.GetDim() == IndexSelectAttributes::ThreeD)
{
if (y < ymin || y >= ymax)
continue;
}
if (atts.GetDim() == IndexSelectAttributes::ThreeD)
{
if (z < zmin || z >= zmax)
continue;
}
// only allow AMR ghosts to be part of the output.
if (gz && !(gz[i] == 0 || gz[i] == 8))
continue;
out_cd->CopyData(in_cd, i, out_cell++);
vtkCell *cell = in_ds->GetCell(i);
vtkIdList *list = cell->GetPointIds();
out_ug->InsertNextCell(in_ds->GetCellType(i), list);
}
out_ds = out_ug;
if (out_ds->GetNumberOfCells() <= 0)
{
out_ds->Delete();
return NULL;
}
//
// If we had poly data input, we want poly data output. The VTK filter
// only returns unstructured grids, so convert that now.
//
if (in_ds->GetDataObjectType() == VTK_POLY_DATA)
{
vtkUnstructuredGrid *ugrid = (vtkUnstructuredGrid *) out_ds;
vtkPolyData *out_pd = vtkPolyData::New();
out_pd->SetPoints(ugrid->GetPoints());
out_pd->GetPointData()->ShallowCopy(ugrid->GetPointData());
out_pd->GetCellData()->ShallowCopy(ugrid->GetCellData());
int ncells = ugrid->GetNumberOfCells();
out_pd->Allocate(ncells);
for (int i = 0 ; i < ncells ; i++)
{
int celltype = ugrid->GetCellType(i);
vtkIdType npts;
#if LIB_VERSION_LE(VTK,8,1,0)
vtkIdType *pts;
#else
const vtkIdType *pts;
#endif
ugrid->GetCellPoints(i, npts, pts);
out_pd->InsertNextCell(celltype, npts, pts);
}
out_ds->Delete();
out_ds = out_pd;
}
}
VisitMutexLock("avtIndexSelectFilter::ExecuteData");
atLeastOneThreadSuccessfullyExecuted = true;
VisitMutexUnlock("avtIndexSelectFilter::ExecuteData");
if ((atts.GetXWrap() || atts.GetYWrap() || atts.GetZWrap()) &&
topoDim > 0 &&
(in_ds->GetDataObjectType() == VTK_STRUCTURED_GRID ||
in_ds->GetDataObjectType() == VTK_RECTILINEAR_GRID))
{
// In the multi-block case, this doesn't work if sil-selection
// has been applied, because only the currently plotted domains
// are used to calculate the maximums... is this okay?
int lo[3] = {0, 0, 0};
int hi[3] = {0, 0, 0};
int dims[3] = {0, 0, 0};
if (out_ds->GetDataObjectType() == VTK_STRUCTURED_GRID)
{
vtkStructuredGrid::SafeDownCast(out_ds)->GetDimensions(dims);
}
else if (out_ds->GetDataObjectType() == VTK_RECTILINEAR_GRID)
{
vtkRectilinearGrid::SafeDownCast(out_ds)->GetDimensions(dims);
}
if (bi_arr)
{
lo[0] = bi_arr->GetValue(0);
lo[1] = bi_arr->GetValue(1);
lo[2] = bi_arr->GetValue(2);
}
for (int i = 0; i < 3; ++i)
{
hi[i] = lo[i] + dims[i] -1;
if (lo[i] < globalDims[i])
globalDims[i] = lo[i];
if (hi[i] > globalDims[i+3])
globalDims[i+3] = hi[i];
}
}
avtDataRepresentation *out_dr = new avtDataRepresentation(out_ds,
in_dr->GetDomain(), in_dr->GetLabel());
if (out_ds != in_ds)
out_ds->Delete();
if (curvilinearFilter != NULL)
{
curvilinearFilter->Delete();
curvilinearFilter = NULL;
}
if (rectilinearFilter != NULL)
{
rectilinearFilter->Delete();
rectilinearFilter = NULL;
}
if (pointsFilter != NULL)
{
pointsFilter->Delete();
pointsFilter = NULL;
}
return out_dr;
}
// ****************************************************************************
// Method: avtIndexSelectFilter::ModifyContract
//
// Purpose:
// Restricts the SIL to the domains requested by the user.
//
// Programmer: Hank Childs
// Creation: June 5, 2002
//
// Modifications:
//
// Hank Childs, Sun Jun 16 20:50:53 PDT 2002
// Add support for non 0-origin blocks.
//
// Hank Childs, Mon Sep 30 17:23:33 PDT 2002
// Add support for index selecting after a destructive operation.
//
// Hank Childs, Mon Dec 2 09:59:56 PST 2002
// Account for changing interface for SIL restriction.
//
// Hank Childs, Thu Aug 14 07:44:49 PDT 2003
// Also request the structured indices if we are specifically told to do
// interface reconstruction.
//
// Mark C. Miller, Tue Sep 28 19:57:42 PDT 2004
// Added code to build a data selection
//
// Kathleen Bonnell, Tue Nov 16 16:13:08 PST 2004
// Gracefully handle domainIndex that is out-of-range, and issue warning.
// Also, use domainIndex when determining chunk for trav.GetMaterials, when
// appropriate.
//
// Kathleen Bonnell, Thu Jul 21 07:52:49 PDT 2005
// For OneGroup selection, determine domains belonging to that group,
// and restrict by domains to ensure that sets that were previously
// turned off remains so. When determining if StructuredIndices are
// required, ensure checking is done only on domains needed by this filter.
//
// Kathleen Bonnell, Thu Aug 4 15:47:59 PDT 2005
// Request original node numbers when required.
//
// Jeremy Meredith, Wed Aug 24 13:37:07 PDT 2005
// Added support for group origin.
//
// Kathleen Bonnell, Thu Jun 7 14:37:49 PDT 2007
// Modified how SIL selection is done, based on new atts.
//
// Kathleen Bonnell, Thu Jun 21 16:31:59 PDT 2007
// If this is an AMR mesh and category is group, don't restrict the SIL
// and also request AMR indices.
//
// Hank Childs, Fri Oct 26 16:40:57 PDT 2007
// Correct some SIL handling. Compact SIL attributes are only defined
// over a subset of the nodes in the SIL graph; comparing them without
// re-indexing ... which was being done previously ... led to incorrect
// results.
//
// Hank Childs, Wed Jan 9 16:10:33 PST 2008
// Beef up logic to handle species selection. We were turning those sets
// off, which resulted in all zeroes.
//
// Dave Bremer, Thu Jan 31 17:52:55 PST 2008
// Small tweak to guard against a case in which the RealMapsOut are
// requested from an avtSILSet, but the set goes out of scope and its maps
// out are deleted before this method is done using them.
//
// Hank Childs, Mon Dec 14 16:55:10 PST 2009
// Add support for new SIL interface.
//
// ****************************************************************************
avtContract_p
avtIndexSelectFilter::ModifyContract(avtContract_p spec)
{
avtContract_p rv = new avtContract(spec);
amrMesh = GetInput()->GetInfo().GetAttributes().GetMeshType() == AVT_AMR_MESH;
bool skipSILRestriction = amrMesh && groupCategory;
if (!atts.GetUseWholeCollection() && !skipSILRestriction)
{
std::string category = atts.GetCategoryName();
std::string subset = atts.GetSubsetName();
avtSILRestriction_p silr = spec->GetDataRequest()->GetRestriction();
avtSILRestriction_p old_values = new avtSILRestriction(silr);
avtSILRestrictionTraverser trav(old_values);
int collectionID = silr->GetCollectionIndex(category, silr->GetTopSet());
int setID = silr->GetSetIndex(subset, collectionID);
if (trav.UsesSetData(setID) == NoneUsed)
{
EXCEPTION1(InvalidSetException, subset.c_str());
}
TRY
{
// If we've got species info, we need to maintain that.
// So see which species are on.
vector<int> species;
vector<bool> setState;
int topset = silr->GetTopSet();
avtSILSet_p pTopset = silr->GetSILSet(topset);
const vector<int> &mapsOut = pTopset->GetRealMapsOut();
avtSILCollection_p speciesColl = NULL;
for (size_t i = 0 ; i < mapsOut.size() ; i++)
{
avtSILCollection_p coll = silr->GetSILCollection(mapsOut[i]);
if (coll->GetRole() == SIL_SPECIES)
{
speciesColl = coll;
}
}
if (*speciesColl != NULL)
{
for (int i = 0 ; i < speciesColl->GetNumberOfSubsets() ; i++)
setState.push_back(trav.UsesData(speciesColl->GetSubset(i)));
}
// End logic for seeing which species is on.
silr = rv->GetDataRequest()->GetRestriction();
silr->TurnOffAll();
silr->TurnOnSet(setID);
// We've just turned on an entire set, but some parts
// (materials) may have been turned off before, so ensure
// that remains the case.
int numSets = silr->GetNumSets();
for (int i = 0; i < numSets ; i++)
{
if (setID == i)
continue;
if (trav.UsesSetData(i) == NoneUsed)
silr->TurnOffSet(i);
}
// Turn sets back on if species are on.
for (size_t i = 0 ; i < species.size() ; i++)
if (setState[i])
silr->TurnOnSet(species[i]);
}
CATCH(InvalidVariableException)
{
// If for some reason the GetSetIndex fails.
RETHROW;
}
ENDTRY
}
if (amrMesh && groupCategory && amrLevel != -1)
{
rv->GetDataRequest()->SetNeedAMRIndices(amrLevel);
}
if (!GetInput()->GetInfo().GetValidity().GetZonesPreserved())
{
rv->GetDataRequest()->SetNeedStructuredIndices(true);
}
else if (rv->GetDataRequest()->MustDoMaterialInterfaceReconstruction())
{
rv->GetDataRequest()->SetNeedStructuredIndices(true);
}
else
{
bool needSI = false;
avtSILRestriction_p silr =rv->GetDataRequest()->GetRestriction();
avtSILRestrictionTraverser trav(silr);
if (atts.GetUseWholeCollection())
{
intVector chunks;
trav.GetDomainList(chunks);
for (size_t i = 0; i < chunks.size(); i++)
{
bool hasMats = false;
trav.GetMaterials(chunks[i], hasMats);
needSI |= hasMats;
}
}
else
{
needSI = !trav.UsesAllMaterials();
}
if (needSI)
{
rv->GetDataRequest()->SetNeedStructuredIndices(true);
}
}
//
// Indicate this operator's data selection
//
avtLogicalSelection *sel = new avtLogicalSelection;
switch (atts.GetDim())
{
case IndexSelectAttributes::OneD: sel->SetNDims(1); break;
case IndexSelectAttributes::TwoD: sel->SetNDims(2); break;
case IndexSelectAttributes::ThreeD: sel->SetNDims(3); break;
}
int vec[3];
vec[0] = atts.GetXMin();
vec[1] = atts.GetYMin();
vec[2] = atts.GetZMin();
sel->SetStarts(vec);
// IndexSelect is nodal based and is inclusive. As such, the user
// can have the min == max. Which would give a slice from a volume.
// avtLogicalSelection's stops are nodal and are inclusive
// also, we need to deal with using '-1' to mean 'max'
if (atts.GetXMax() == -1)
vec[0] = -1;
else
vec[0] = atts.GetXMax();
if (atts.GetYMax() == -1)
vec[1] = -1;
else
vec[1] = atts.GetYMax();
if (atts.GetZMax() == -1)
vec[2] = -1;
else
vec[2] = atts.GetZMax();
sel->SetStops(vec);
vec[0] = atts.GetXIncr();
vec[1] = atts.GetYIncr();
vec[2] = atts.GetZIncr();
sel->SetStrides(vec);
selID = rv->GetDataRequest()->AddDataSelection(sel);
if (rv->GetDataRequest()->MayRequireNodes())
rv->GetDataRequest()->TurnNodeNumbersOn();
return rv;
}
// ****************************************************************************
// Method: avtIndexSelectFilter::PreExecute
//
// Purpose:
// Called before Execute, which in turn calls ExecuteData.
//
// Programmer: Hank Childs
// Creation: June 29, 2002
//
// ****************************************************************************
void
avtIndexSelectFilter::PreExecute(void)
{
avtPluginDataTreeIterator::PreExecute();
atLeastOneThreadSuccessfullyExecuted = false;
if (!GetInput()->GetInfo().GetValidity().GetZonesPreserved())
{
if (atts.GetXIncr()!=1 || atts.GetYIncr()!=1 || atts.GetZIncr()!=1)
{
avtCallback::IssueWarning("The data was already modified "
"before the index select operator was applied."
" It is only possible to do increments of 1.");
}
}
}
// ****************************************************************************
// Method: avtIndexSelectFilter::PostExecute
//
// Purpose:
// Called after Execute (which in turn called ExecuteData).
//
// Programmer: Hank Childs
// Creation: June 29, 2002
//
// ****************************************************************************
void
avtIndexSelectFilter::PostExecute(void)
{
avtPluginDataTreeIterator::PostExecute();
int topoDim =
GetInput()->GetInfo().GetAttributes().GetTopologicalDimension();
int dstype = GetInput()->GetInfo().GetAttributes().GetMeshType();
// If the initial mesh is unstructure then cheating and processing
// an unstrucrutured mesh so skip checking for a topology
// dimension reduction.
if (topoDim > 0 && dstype != AVT_UNSTRUCTURED_MESH)
{
if (atLeastOneThreadSuccessfullyExecuted)
{
int newdim =
GetInput()->GetInfo().GetAttributes().GetTopologicalDimension();
switch (atts.GetDim())
{
case IndexSelectAttributes::ThreeD:
if (atts.GetZMin() == atts.GetZMax())
--newdim;
// FALLTHRU
case IndexSelectAttributes::TwoD:
if (atts.GetYMin() == atts.GetYMax())
--newdim;
// FALLTHRU
case IndexSelectAttributes::OneD:
if (atts.GetXMin() == atts.GetXMax())
--newdim;
break;
default:
EXCEPTION0(ImproperUseException);
}
newdim = (newdim < 0 ? 0 : newdim);
GetOutput()->GetInfo().GetAttributes().SetTopologicalDimension(newdim);
}
}
if ( GetOutput()->GetInfo().GetAttributes().GetTopologicalDimension() > 0 &&
(atts.GetXWrap() || atts.GetYWrap() || atts.GetZWrap()))
{
Replicate();
avtDataAttributes &outAtts = GetOutput()->GetInfo().GetAttributes();
outAtts.GetOriginalSpatialExtents()->Clear();
outAtts.GetDesiredSpatialExtents()->Clear();
outAtts.GetActualSpatialExtents()->Clear();
double bounds[6];
avtDataset_p ds = GetTypedOutput();
avtDatasetExaminer::GetSpatialExtents(ds, bounds);
outAtts.GetThisProcsOriginalSpatialExtents()->Set(bounds);
}
}
// ****************************************************************************
// Method: avtIndexSelectFilter::ReleaseData
//
// Purpose:
// Releases the problem size data associated with this filter.
//
// Programmer: Hank Childs
// Creation: September 10, 2002
//
// Modifications:
//
// Hank Childs, Fri Mar 4 08:12:25 PST 2005
// Do not set outputs of filters to NULL, since this will prevent them
// from re-executing correctly in DLB-mode.
//
// Hank Childs, Fri Mar 11 07:37:05 PST 2005
// Fix non-problem size leak introduced with last fix.
//
// Kathleen Bonnell, Mon Jan 30 15:10:26 PST 2006
// Handle vtkMaskPoints.
//
// ****************************************************************************
void
avtIndexSelectFilter::ReleaseData(void)
{
avtPluginDataTreeIterator::ReleaseData();
lspace.clear();
}
// ****************************************************************************
// Method: avtIndexSelectFilter::UpdateDataObjectInfo
//
// Purpose:
// Indicates that original nodes are required for Pick, and that
// original zones cannot be used with Pick.
//
// Programmer: Kathleen Bonnell
// Creation: August 4, 2005
//
// Modifications:
// Kathleen Bonnell, Mon May 1 08:57:41 PDT 2006
// Changed call from OrigNodes to OrigElements, indicating that either
// nodes or zones are required, or both.
//
// Hank Childs, Sat Mar 3 16:28:16 PST 2007
// Put in about data attributes that we removed ghost data.
//
// Kathleen Biagas, Tue Apr 1 14:17:22 PDT 2014
// Invalidate zones.
//
// ****************************************************************************
void
avtIndexSelectFilter::UpdateDataObjectInfo(void)
{
//
// Node Pick returns wrong results on an Index selected plot unless it has
// original node numbers. (The points are not transformed, but their
// numbering is probably different. So set a flag that specifies that
// they are needed for pick.
//
GetOutput()->GetInfo().GetAttributes().SetOrigElementsRequiredForPick(true);
//
// Zone Pick CANNOT use Original zone numbers on an Index selected plot
// because it may be the case that MANY original zones map to a SINGLE
// current zone. So set a flag specifying that the original zones
// array CANNOT be used with pick.
//
GetOutput()->GetInfo().GetAttributes().SetCanUseOrigZones(false);
GetOutput()->GetInfo().GetAttributes().SetContainsGhostZones(AVT_NO_GHOSTS);
//
// Indicate zone numbering probably changed
//
GetOutput()->GetInfo().GetValidity().InvalidateZones();
}
// ****************************************************************************
// Method: avtIndexSelectFilter::FilterUnderstandsTransformedRectMesh
//
// Purpose:
// If this filter returns true, this means that it correctly deals
// with rectilinear grids having an implied transform set in the
// data attributes. It can do this conditionally if desired.
//
// Arguments:
// none
//
// Programmer: Jeremy Meredith
// Creation: February 15, 2007
//
// ****************************************************************************
bool
avtIndexSelectFilter::FilterUnderstandsTransformedRectMesh()
{
// Index select is based on logical coordinates only.
return true;
}
// ****************************************************************************
// Method: avtIndexSelectFilter::VerifyInput
//
// Purpose:
// Throw an exception if user-selected domain is out of range.
//
// Programmer: Kathleen Bonnell
// Creation: June 7, 2007
//
// Modifications:
// Kathleen Bonnell, Thu Jun 21 16:31:59 PDT 2007
// Determine amrLevel during set validation.
//
// Eric Brugger, Wed Dec 3 08:23:58 PST 2008
// I modified the routine to set groupCatergory to true when "Use Whole
// Collection" was set, so that it would would index select based on the
// whole mesh (using base_index) and not on a per block basis.
//
// Hank Childs, Mon Dec 14 16:55:10 PST 2009
// Updated for new SIL interface.
//
// ****************************************************************************
void
avtIndexSelectFilter::VerifyInput()
{
if (atts.GetUseWholeCollection() || atts.GetSubsetName() == "Whole")
{
groupCategory = true;
return;
}
std::string category = atts.GetCategoryName();
std::string subset = atts.GetSubsetName();
avtSILRestriction_p silr = GetOriginatingSource()->
GetFullDataRequest()->GetRestriction();
int setID, collectionID;
TRY
{
collectionID = silr->GetCollectionIndex(category, silr->GetTopSet());
setID = silr->GetSetIndex(subset, collectionID);
avtSILCollection_p coll = silr->GetSILCollection(collectionID);
if (coll->GetRole() != SIL_DOMAIN && coll->GetRole() != SIL_BLOCK)
{
//
// May occur if user types in a category name.
//
EXCEPTION1(InvalidCategoryException, category.c_str());
}
bool validSet = false;
int nEls = coll->GetNumberOfSubsets();
for (int i = 0; i < nEls && !validSet; i++)
{
validSet = (setID == coll->GetSubset(i));
if (validSet && coll->GetRole() == SIL_BLOCK)
amrLevel = i;
}
if (!validSet)
{
//
// May occur if user types in a set name.
//
EXCEPTION2(InvalidSetException, category.c_str(), subset.c_str());
}
if (coll->GetRole() == SIL_BLOCK)
{
groupCategory = true;
}
}
CATCH(InvalidVariableException)
{
//
// SIL could not match category name or subset name to an id.
//
RETHROW;
}
ENDTRY
}
vtkDataArray *avtIndexSelectFilter::GetCoordinates( vtkRectilinearGrid *grid,
unsigned int coor)
{
if( coor == 0 )
return grid->GetXCoordinates();
else if( coor == 1 )
return grid->GetYCoordinates();
else if( coor == 2 )
return grid->GetZCoordinates();
else
return 0;
}
void avtIndexSelectFilter::SetCoordinates( vtkRectilinearGrid *grid,
vtkDataArray *coordinates,
unsigned int coor)
{
if( coor == 0 )
grid->SetXCoordinates(coordinates);
else if( coor == 1 )
grid->SetYCoordinates(coordinates);
else if( coor == 2 )
grid->SetZCoordinates(coordinates);
}
// ****************************************************************************
// Method: avtIndexSelectFilter::Replicate
//
// Purpose:
// Replicates the first slice to the last slice for wrapping.
//
// Programmer: Hank Childs/Allen Sanderson
// Creation: April 14, 2010
//
// Modifications:
// Kathleen Biagas, Tue Aug 21 16:14:59 MST 2012
// Preserve coordinate type.
//
// Kathleen biagas, Tue June 9 09:36:27 MST 2015
// Changed signatu
// ****************************************************************************
void
avtIndexSelectFilter::Replicate()
{
avtDataset_p ds = GetTypedOutput();
#ifdef PARALLEL
int gd[6];
MPI_Allreduce(&globalDims[0], &gd[0], 3, MPI_INT, MPI_MIN, VISIT_MPI_COMM);
MPI_Allreduce(&globalDims[3], &gd[3], 3, MPI_INT, MPI_MAX, VISIT_MPI_COMM);
for (int i = 0; i < 6; ++i)
globalDims[i] = gd[i];
#endif
int myRank = PAR_Rank();
lspace.clear();
int nblocks = 0;
vtkDataSet **blocks = ds->GetDataTree()->GetAllLeaves(nblocks);
vector<int> domainIds;
vector<string> labels;
ds->GetDataTree()->GetAllDomainIds(domainIds);
ds->GetDataTree()->GetAllLabels(labels);
bool wrap[3] = {atts.GetXWrap(), atts.GetYWrap(), atts.GetZWrap()};
bool haveNewOutput = false;
int dataObjectType = -1;
bool okay = true;
for (int n = 0; n < nblocks && okay; ++n)
{
vtkDataSet *ds = blocks[n];
dataObjectType = ds->GetDataObjectType();
int dims[3];
vtkIntArray *bi = vtkIntArray::SafeDownCast(
ds->GetFieldData()->GetArray("base_index"));
if (!bi)
{
okay = false;
break;
}
int *mins = (int*)bi->GetVoidPointer(0);
int max[3];
if (dataObjectType == VTK_RECTILINEAR_GRID)
{
vtkRectilinearGrid *rgrid = vtkRectilinearGrid::SafeDownCast(ds);
rgrid->GetDimensions(dims);
bool doReplicate = false;
for (int i = 0; i < 3; ++i)
{
max[i] = mins[i]+dims[i] -1;
doReplicate |= (wrap[i] && max[i] == globalDims[i+3]);
}
if (doReplicate)
{
blocks[n] = Replicate(rgrid, wrap, dims, max);
haveNewOutput = true;
}
}
else if (dataObjectType == VTK_STRUCTURED_GRID)
{
vtkStructuredGrid::SafeDownCast(ds)->GetDimensions(dims);
avtIndexSelectFilter::LogicalSpaces ls;
for (int i = 0; i < 3; ++i)
{
max[i] = mins[i]+dims[i] -1;
}
ls.SetMins(mins);
ls.SetMaxs(max);
ls.rank = myRank;
ls.block = domainIds[n];
lspace.push_back(ls);
}
else
{
okay = false;
break;
}
}
#ifdef PARALLEL
okay = (bool) UnifyMaximumValue((int)okay);
dataObjectType = UnifyMaximumValue(dataObjectType);
#endif
if (!okay)
{
lspace.clear();
return;
}
if (dataObjectType == VTK_STRUCTURED_GRID)
{
#ifdef PARALLEL
int tags[2];
GetUniqueMessageTags(tags, 2);
int mpiNBTag = tags[0];
int mpiDataTag = tags[1];
vector<avtIndexSelectFilter::LogicalSpaces> maxSpaces;
vector<avtIndexSelectFilter::LogicalSpaces> minSpaces;
if (myRank == 0)
{
MPI_Status stat;
MPI_Status stat2;
for (int i = 1; i < PAR_Size(); ++i)
{
int nb;
MPI_Recv(&nb, 1, MPI_INT, MPI_ANY_SOURCE, mpiNBTag,
VISIT_MPI_COMM, &stat);
for (int j = 0; j < nb; ++j)
{
int data[8];
MPI_Recv(data, 8, MPI_INT, stat.MPI_SOURCE,
mpiDataTag, VISIT_MPI_COMM, &stat2);
avtIndexSelectFilter::LogicalSpaces ls;
ls.rank = data[0];
ls.block = data[1];
ls.SetMins(&data[2]);
ls.SetMaxs(&data[5]);
lspace.push_back(ls);
}
}
}
else
{
MPI_Send(&nblocks, 1, MPI_INT, 0, mpiNBTag, VISIT_MPI_COMM);
for (int i = 0; i < nblocks; ++i)
{
MPI_Send(&(lspace[i].rank), 8, MPI_INT, 0, mpiDataTag,
VISIT_MPI_COMM);
}
}
Barrier();
#endif
for (int i = 0; i < 3 && okay; ++i)
{
if(!wrap[i])
continue;
if (myRank == 0)
{
for (size_t j = 0; j < lspace.size(); ++j)
{
if (lspace[j].MatchesMaxAt(i, globalDims[i+3]))
{
int findMin[3];
for (int k = 0; k < 3; ++k)
{
if (k == i)
findMin[k] = globalDims[i];
else
findMin[k] = lspace[j].mins[k];
}
for (size_t k = 0; k < lspace.size(); ++k)
{
if (lspace[k].MatchesMins(findMin))
{
#ifdef PARALLEL
maxSpaces.push_back(lspace[j]);
minSpaces.push_back(lspace[k]);
#else
int maxBlock = lspace[j].block;
int minBlock = lspace[k].block;
blocks[maxBlock] = Replicate(i, blocks[minBlock],
blocks[maxBlock]);
haveNewOutput = true;
#endif
break;
}
}
}
}
} // PAR_Rank
#ifdef PARALLEL
Barrier();
int numMatches = (int)maxSpaces.size();
BroadcastInt(numMatches);
maxSpaces.resize(numMatches);
minSpaces.resize(numMatches);
for (int j = 0; j < numMatches; ++j)
{
BroadcastIntArray(&(maxSpaces[j].rank), 8);
BroadcastIntArray(&(minSpaces[j].rank), 8);
}
for (size_t j = 0; j < maxSpaces.size(); ++j)
{
int tags[2];
GetUniqueMessageTags(tags, 2);
int mpiSizeTag = tags[0];
int mpiDSTag = tags[1];
size_t maxBlock = 0;
size_t minBlock = 0;
if (myRank == maxSpaces[j].rank && myRank == minSpaces[j].rank)
{
// Both min and max blocks reside on this processor
for (size_t k = 0; k < domainIds.size(); ++k)
{
if (domainIds[k] == maxSpaces[j].block)
maxBlock = k;
if (domainIds[k] == minSpaces[j].block)
minBlock = k;
}
blocks[maxBlock] = Replicate(i,
blocks[minBlock],
blocks[maxBlock]);
haveNewOutput = true;
}
else if (myRank == maxSpaces[j].rank)
{
// Need to recv the vtkDataSet from proc with min
for (size_t k = 0; k < domainIds.size(); ++k)
{
if (domainIds[k] == maxSpaces[j].block)
maxBlock = k;
}
MPI_Status stat;
int size;
vtkDataSetReader *reader = vtkDataSetReader::New();
reader->ReadFromInputStringOn();
MPI_Recv(&size, 1, MPI_INT, minSpaces[j].rank, mpiSizeTag,
VISIT_MPI_COMM, &stat);
char *str = new char[size];
MPI_Recv(str, size, MPI_CHAR, minSpaces[j].rank, mpiDSTag,
VISIT_MPI_COMM, &stat);
vtkCharArray *charArray = vtkCharArray::New();
charArray->SetArray((char*)str, size, 1);
reader->SetInputArray(charArray);
reader->Update();
// Do the replication
blocks[maxBlock] = Replicate(i,
reader->GetOutput(),
blocks[maxBlock]);
haveNewOutput = true;
// cleanup;
delete [] str;
reader->Delete();
charArray->Delete();
}
else if (myRank == minSpaces[j].rank)
{
for (size_t k = 0; k < domainIds.size(); ++k)
{
if (domainIds[k] == minSpaces[j].block)
minBlock = k;
}
// Need to send the vtkDataSet to proc with max
vtkDataSetWriter *writer = vtkDataSetWriter::New();
writer->WriteToOutputStringOn();
writer->SetFileTypeToBinary();
writer->SetInputData(blocks[minBlock]);
writer->Write();
int size = writer->GetOutputStringLength();
char *str = writer->RegisterAndGetOutputString();
MPI_Send(&size, 1, MPI_INT, maxSpaces[j].rank, mpiSizeTag,
VISIT_MPI_COMM);
MPI_Send(str, size, MPI_CHAR, maxSpaces[j].rank, mpiDSTag,
VISIT_MPI_COMM);
delete [] str;
writer->Delete();
}
}
#endif
} // wrap direction
}
if (haveNewOutput)
{
avtDataTree_p tree;
if (!labels.empty())
{
tree = new avtDataTree(nblocks, blocks, domainIds, labels);
}
else
{
tree = new avtDataTree(nblocks, blocks, domainIds);
}
SetOutputDataTree(tree);
}
}
// ****************************************************************************
// Method: avtIndexSelectFilter::Replicate
//
// Purpose:
// Replicates the first slice to the last slice for wrapping.
//
// Arguments:
// wrap The wrap direction (0->i, 1->j, 2->k).
// min_ds The dataset with the minimum slice for the wrap direction.
// max_ds The dataset with the maximum slice for the wrap direction.
//
// Programmer: Hank Childs/Allen Sanderson
// Creation: April 14, 2010
//
// Modifications:
// Kathleen Biagas, Tue Aug 21 16:14:59 MST 2012
// Preserve coordinate type.
//
// Kathleen biagas, Tue June 9 09:36:27 MST 2015
// Changed signature (to handle multi-block and parallel), this method
// now only handles vtkStructuredGrid.
//
// ****************************************************************************
vtkDataSet *
avtIndexSelectFilter::Replicate(int wrap, vtkDataSet *min_ds,
vtkDataSet *max_ds)
{
vtkStructuredGrid *min_sgrid = vtkStructuredGrid::SafeDownCast(min_ds);
vtkStructuredGrid *max_sgrid = vtkStructuredGrid::SafeDownCast(max_ds);
if (min_sgrid == NULL || max_sgrid == NULL)
{
debug3 << "IndexSelect::Replicate did not receive structured "
<< "grid input." << endl;
return max_ds;
}
int dims_in[3] = {0,0,0};
int dims_out[3] = {0,0,0};
vtkDataSet *out_ds = max_ds;
// Learn about the input grid
max_sgrid->GetDimensions(dims_in);
dims_out[0] = dims_in[0];
dims_out[1] = dims_in[1];
dims_out[2] = dims_in[2];
dims_out[wrap]++;
vtkPoints *pts = max_sgrid->GetPoints();
vtkPoints *min_pts = vtkStructuredGrid::SafeDownCast(min_ds)->GetPoints();
vtkPoints *new_pts = vtkPoints::New(pts->GetDataType());
vtkStructuredGrid *out_sg = vtkStructuredGrid::New();
out_sg->SetDimensions(dims_out);
out_sg->SetPoints(new_pts);
new_pts->Delete();
new_pts->SetNumberOfPoints(dims_out[0]*dims_out[1]*dims_out[2]);
// Copy the original points over.
for (int i=0; i<dims_out[0]; ++i)
{
int i0 = i % dims_in[0];
for (int j=0; j<dims_out[1]; ++j)
{
int j0 = j % dims_in[1];
for (int k=0; k<dims_out[2]; ++k)
{
int k0 = k % dims_in[2];
int idx_in = k0*dims_in[1]*dims_in[0] + j0*dims_in[0] + i0;
int idx_out = k*dims_out[1]*dims_out[0] + j*dims_out[0] + i;
if (i < dims_in[0] && j < dims_in[1] && k < dims_in[2])
new_pts->SetPoint(idx_out, pts->GetPoint(idx_in));
else
new_pts->SetPoint(idx_out, min_pts->GetPoint(idx_in));
}
}
}
if( out_ds != max_ds )
out_ds->Delete();
out_ds = out_sg;
CopyData(max_ds, out_ds, dims_in, dims_out);
return out_ds;
}
// ****************************************************************************
// Method: avtIndexSelectFilter::Replicate
//
// Purpose:
// Replicates the first slice to the last slice for wrapping.
//
// Arguments:
// rgrid The input dataset.
// wrap Whether or not to wrap in i,j,k.
// in_dims The dimensions of the input grid.
//
// Programmer: Hank Childs/Allen Sanderson
// Creation: April 14, 2010
//
// Modifications:
// Kathleen Biagas, Tue Aug 21 16:14:59 MST 2012
// Preserve coordinate type.
//
// Kathleen Biagas, Tue June 9 09:36:27 MST 2015
// Extracted from original method to only handle rectilinear grid.
//
// Kathleen Biagas, Wed June 10 10:45:43 MST 2015
// Fixed logic so that wrapping for a given direction doesn't happend
// if the input's max dims do not match the global max dims.
//
// ****************************************************************************
vtkDataSet *
avtIndexSelectFilter::Replicate(vtkRectilinearGrid *rgrid, bool wrap[3],
int dims_in[3], int max[3])
{
vtkRectilinearGrid *out = rgrid;
bool haveCopied = false;
int dims_out[3] = {dims_in[0], dims_in[1], dims_in[2] };
// Do each dimension individually.
for( unsigned int d=0; d<3; ++d )
{
if( !wrap[d] )
continue;
// does this dataset have the max dims?
if (max[d] != globalDims[d+3])
continue;
if (!haveCopied)
{
out = rgrid->NewInstance();
out->DeepCopy(rgrid);
haveCopied = true;
}
dims_out[d]++;
vtkDataArray *coor = GetCoordinates(out, d);
double lastVal = coor->GetTuple1(dims_in[d]-1);
double prevVal = coor->GetTuple1(dims_in[d]-2);
coor->InsertNextTuple1(lastVal + (lastVal-prevVal));
}
if (haveCopied)
{
out->SetDimensions(dims_out);
CopyData(rgrid, out, dims_in, dims_out);
}
return out;
}
// ****************************************************************************
// Method: avtIndexSelectFilter::CopyData
//
// Purpose:
// Copies Point and Cell Data from input to output datasets, utilizing
// input and output structured dimensions.
//
// Arguments:
// in_ds The input dataset.
// out_ds The output dataset.
// in_dims Dimensions for input dataset.
// out_dims Dimensions for output dataset.
//
// Programmer: Kathleen Biagas
// Creation: June 9, 2015
//
// Modifications:
//
// ****************************************************************************
void
avtIndexSelectFilter::CopyData(vtkDataSet *in_ds, vtkDataSet *out_ds, int dims_in[3], int dims_out[3])
{
// Copy over the point data.
vtkPointData *inPD = in_ds->GetPointData();
vtkPointData *outPD = out_ds->GetPointData();
outPD->CopyAllocate(inPD, dims_out[0]*dims_out[1]*dims_out[2]);
for (int i1=0; i1<dims_out[0]; ++i1)
{
int i0 = i1 % dims_in[0];
for (int j1=0; j1<dims_out[1]; ++j1)
{
int j0 = j1 % dims_in[1];
for (int k1=0; k1<dims_out[2]; ++k1)
{
int k0 = k1 % dims_in[2];
int idx_in = k0*dims_in[1]*dims_in[0] + j0*dims_in[0] + i0;
int idx_out = k1*dims_out[1]*dims_out[0] + j1*dims_out[0] + i1;
outPD->CopyData(inPD, idx_in, idx_out);
}
}
}
// Copy over the cell data.
int xdims = (dims_in[0]-1 < 1 ? 1 : dims_in[0]-1);
int ydims = (dims_in[1]-1 < 1 ? 1 : dims_in[1]-1);
int zdims = (dims_in[2]-1 < 1 ? 1 : dims_in[2]-1);
int xdims_out = (dims_out[0]-1 < 1 ? 1 : dims_out[0]-1);
int ydims_out = (dims_out[1]-1 < 1 ? 1 : dims_out[1]-1);
int zdims_out = (dims_out[2]-1 < 1 ? 1 : dims_out[2]-1);
vtkCellData *outCD = out_ds->GetCellData();
vtkCellData *inCD = in_ds->GetCellData();
outCD->CopyAllocate(inCD, xdims_out*ydims_out*zdims_out);
for (int i1=0; i1<xdims_out; ++i1)
{
int i0 = i1 % xdims;
for (int j1=0; j1<ydims_out; ++j1)
{
int j0 = j1 % ydims;
for (int k1=0; k1<zdims_out; ++k1)
{
int k0 = k1 % zdims;
int idx_in = k0*(ydims*xdims) + j0*xdims + i0;
int idx_out = k1*(ydims_out*xdims_out) + j1*xdims_out + i1;
outCD->CopyData(inCD, idx_in, idx_out);
}
}
}
}
// ****************************************************************************
// Method: avtIndexSelectFilter::LogicalSpaces constructor
//
// Purpse: Helper class for 'wrap' option.
//
// Programmer: Kathleen Biagas
// Creation: June 9, 2015
//
// ****************************************************************************
avtIndexSelectFilter::LogicalSpaces::LogicalSpaces()
{
mins[0] = mins[1] = mins[2] = -1;
maxs[0] = maxs[1] = maxs[2] = -1;
}
// ****************************************************************************
// Method: avtIndexSelectFilter::LogicalSpaces destructor
//
// Programmer: Kathleen Biagas
// Creation: June 9, 2015
//
// ****************************************************************************
avtIndexSelectFilter::LogicalSpaces::~LogicalSpaces()
{
}
// ****************************************************************************
// Method: avtIndexSelectFilter::LogicalSpaces::SetMins
//
// Purpose:
// Sets the minimum values.
//
// Arguments:
// _min The minimum values.
//
// Programmer: Kathleen Biagas
// Creation: June 9, 2015
//
// ****************************************************************************
void
avtIndexSelectFilter::LogicalSpaces::SetMins(int *_min)
{
mins[0] = _min[0];
mins[1] = _min[1];
mins[2] = _min[2];
}
// ****************************************************************************
// Method: avtIndexSelectFilter::LogicalSpaces::SetMaxs
//
// Purpose:
// Sets the maximum values.
//
// Arguments:
// _max The maximum values.
//
// Programmer: Kathleen Biagas
// Creation: June 9, 2015
//
// ****************************************************************************
void
avtIndexSelectFilter::LogicalSpaces::SetMaxs(int *_max)
{
maxs[0] = _max[0];
maxs[1] = _max[1];
maxs[2] = _max[2];
}
// ****************************************************************************
// Method: avtIndexSelectFilter::LogicalSpaces::HasMinAt
//
// Returns:
// True if the minimum value at index 'idx' matches the passed value.
//
// Arguments:
// idx The index.
// _min The minimum value to be matched.
//
// Programmer: Kathleen Biagas
// Creation: June 9, 2015
//
// ****************************************************************************
bool
avtIndexSelectFilter::LogicalSpaces::HasMinAt(int idx, int _min)
{
return idx < 3 && idx >= 0 && mins[idx] == _min;
}
// ****************************************************************************
// Method: avtIndexSelectFilter::LogicalSpaces::MatchesMin
//
// Returns:
// True if the mins values matches passed values.
//
// Arguments:
// i_min The value to match at position 0.
// j_min The value to match at position 1.
// k_min The value to match at position 2.
//
// Programmer: Kathleen Biagas
// Creation: June 9, 2015
//
// ****************************************************************************
bool
avtIndexSelectFilter::LogicalSpaces::MatchesMins(int _min[3])
{
return mins[0] == _min[0] && mins[1] == _min[1] && mins[2] == _min[2];
}
// ****************************************************************************
// Method: avtIndexSelectFilter::LogicalSpaces::HasMaxAt
//
// Returns:
// True if the maximum value at index 'idx' matches the passed value.
//
// Arguments:
// idx The index.
// _max The maximum value to be matched.
//
// Programmer: Kathleen Biagas
// Creation: June 9, 2015
//
// ****************************************************************************
bool
avtIndexSelectFilter::LogicalSpaces::MatchesMaxAt(int idx, int _max)
{
return idx < 3 && idx >= 0 && maxs[idx] == _max;
}
|
97ec7f0ad7938c2f66e2170fb8db20e14cf40ac2
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/python/scipy/py2/scipy/special/cephes/psi.c
|
ce7a722bf8f8afd725ecacfc43e54deb031ba6c8
|
[
"Python-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Qhull",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
C
| false
| false
| 4,723
|
c
|
psi.c
|
/* psi.c
*
* Psi (digamma) function
*
*
* SYNOPSIS:
*
* double x, y, psi();
*
* y = psi( x );
*
*
* DESCRIPTION:
*
* d -
* psi(x) = -- ln | (x)
* dx
*
* is the logarithmic derivative of the gamma function.
* For integer x,
* n-1
* -
* psi(n) = -EUL + > 1/k.
* -
* k=1
*
* This formula is used for 0 < n <= 10. If x is negative, it
* is transformed to a positive argument by the reflection
* formula psi(1-x) = psi(x) + pi cot(pi x).
* For general positive x, the argument is made greater than 10
* using the recurrence psi(x+1) = psi(x) + 1/x.
* Then the following asymptotic expansion is applied:
*
* inf. B
* - 2k
* psi(x) = log(x) - 1/2x - > -------
* - 2k
* k=1 2k x
*
* where the B2k are Bernoulli numbers.
*
* ACCURACY:
* Relative error (except absolute when |psi| < 1):
* arithmetic domain # trials peak rms
* IEEE 0,30 30000 1.3e-15 1.4e-16
* IEEE -30,0 40000 1.5e-15 2.2e-16
*
* ERROR MESSAGES:
* message condition value returned
* psi singularity x integer <=0 NPY_INFINITY
*/
/*
* Cephes Math Library Release 2.8: June, 2000
* Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier
*/
/*
* Code for the rational approximation on [1, 2] is:
*
* (C) Copyright John Maddock 2006.
* Use, modification and distribution are subject to the
* Boost Software License, Version 1.0. (See accompanying file
* LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
*/
#include "mconf.h"
static double A[] = {
8.33333333333333333333E-2,
-2.10927960927960927961E-2,
7.57575757575757575758E-3,
-4.16666666666666666667E-3,
3.96825396825396825397E-3,
-8.33333333333333333333E-3,
8.33333333333333333333E-2
};
static double digamma_imp_1_2(double x)
{
/*
* Rational approximation on [1, 2] taken from Boost.
*
* Now for the approximation, we use the form:
*
* digamma(x) = (x - root) * (Y + R(x-1))
*
* Where root is the location of the positive root of digamma,
* Y is a constant, and R is optimised for low absolute error
* compared to Y.
*
* Maximum Deviation Found: 1.466e-18
* At double precision, max error found: 2.452e-17
*/
double r, g;
static const float Y = 0.99558162689208984f;
static const double root1 = 1569415565.0 / 1073741824.0;
static const double root2 = (381566830.0 / 1073741824.0) / 1073741824.0;
static const double root3 = 0.9016312093258695918615325266959189453125e-19;
static double P[] = {
-0.0020713321167745952,
-0.045251321448739056,
-0.28919126444774784,
-0.65031853770896507,
-0.32555031186804491,
0.25479851061131551
};
static double Q[] = {
-0.55789841321675513e-6,
0.0021284987017821144,
0.054151797245674225,
0.43593529692665969,
1.4606242909763515,
2.0767117023730469,
1.0
};
g = x - root1;
g -= root2;
g -= root3;
r = polevl(x - 1.0, P, 5) / polevl(x - 1.0, Q, 6);
return g * Y + g * r;
}
static double psi_asy(double x)
{
double y, z;
if (x < 1.0e17) {
z = 1.0 / (x * x);
y = z * polevl(z, A, 6);
}
else {
y = 0.0;
}
return log(x) - (0.5 / x) - y;
}
double psi(double x)
{
double y = 0.0;
double q, r;
int i, n;
if (npy_isnan(x)) {
return x;
}
else if (x == NPY_INFINITY) {
return x;
}
else if (x == -NPY_INFINITY) {
return NPY_NAN;
}
else if (x == 0) {
mtherr("psi", SING);
return npy_copysign(NPY_INFINITY, -x);
}
else if (x < 0.0) {
/* argument reduction before evaluating tan(pi * x) */
r = modf(x, &q);
if (r == 0.0) {
mtherr("psi", SING);
return NPY_NAN;
}
y = -NPY_PI / tan(NPY_PI * r);
x = 1.0 - x;
}
/* check for positive integer up to 10 */
if ((x <= 10.0) && (x == floor(x))) {
n = (int)x;
for (i = 1; i < n; i++) {
y += 1.0 / i;
}
y -= NPY_EULER;
return y;
}
/* use the recurrence relation to move x into [1, 2] */
if (x < 1.0) {
y -= 1.0 / x;
x += 1.0;
}
else if (x < 10.0) {
while (x > 2.0) {
x -= 1.0;
y += 1.0 / x;
}
}
if ((1.0 <= x) && (x <= 2.0)) {
y += digamma_imp_1_2(x);
return y;
}
/* x is large, use the asymptotic series */
y += psi_asy(x);
return y;
}
|
7f84c68774a0158409063abdd2464d72c68325da
|
8a51a96f61699f0318315ccc89cef39f6866f2b5
|
/src/include/executor/hashjoin.h
|
cb2a2cde8a8e9749160e24396dfd95ba6bb4e478
|
[
"PostgreSQL"
] |
permissive
|
postgres/postgres
|
979febf2b41c00090d1256228f768f33e7ef3b6f
|
b5934bfd6071fed3a38cea0cfaa93afda63d9c0c
|
refs/heads/master
| 2023-08-31T00:10:01.373472
| 2023-08-30T23:07:48
| 2023-08-30T23:07:48
| 927,442
| 13,691
| 4,807
|
NOASSERTION
| 2023-09-09T13:59:15
| 2010-09-21T11:35:45
|
C
|
UTF-8
|
C
| false
| false
| 15,681
|
h
|
hashjoin.h
|
/*-------------------------------------------------------------------------
*
* hashjoin.h
* internal structures for hash joins
*
*
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/executor/hashjoin.h
*
*-------------------------------------------------------------------------
*/
#ifndef HASHJOIN_H
#define HASHJOIN_H
#include "nodes/execnodes.h"
#include "port/atomics.h"
#include "storage/barrier.h"
#include "storage/buffile.h"
#include "storage/lwlock.h"
/* ----------------------------------------------------------------
* hash-join hash table structures
*
* Each active hashjoin has a HashJoinTable structure, which is
* palloc'd in the executor's per-query context. Other storage needed for
* each hashjoin is kept in child contexts, three for each hashjoin:
* - HashTableContext (hashCxt): the parent hash table storage context
* - HashSpillContext (spillCxt): storage for temp files buffers
* - HashBatchContext (batchCxt): storage for a batch in serial hash join
*
* The hashtable contexts are made children of the per-query context, ensuring
* that they will be discarded at end of statement even if the join is
* aborted early by an error. (Likewise, any temporary files we make will
* be cleaned up by the virtual file manager in event of an error.)
*
* Storage that should live through the entire join is allocated from the
* "hashCxt" (mainly the hashtable's metadata). Also, the "hashCxt" context is
* the parent of "spillCxt" and "batchCxt". It makes it easy and fast to
* release the storage when we don't need it anymore.
*
* Data associated with temp files is allocated in the "spillCxt" context
* which lives for the duration of the entire join as batch files'
* creation and usage may span batch execution. These files are
* explicitly destroyed by calling BufFileClose() when the code is done
* with them. The aim of this context is to help accounting for the
* memory allocated for temp files and their buffers.
*
* Finally, data used only during a single batch's execution is allocated
* in the "batchCxt". By resetting the batchCxt at the end of each batch,
* we free all the per-batch storage reliably and without tedium.
*
* During first scan of inner relation, we get its tuples from executor.
* If nbatch > 1 then tuples that don't belong in first batch get saved
* into inner-batch temp files. The same statements apply for the
* first scan of the outer relation, except we write tuples to outer-batch
* temp files. After finishing the first scan, we do the following for
* each remaining batch:
* 1. Read tuples from inner batch file, load into hash buckets.
* 2. Read tuples from outer batch file, match to hash buckets and output.
*
* It is possible to increase nbatch on the fly if the in-memory hash table
* gets too big. The hash-value-to-batch computation is arranged so that this
* can only cause a tuple to go into a later batch than previously thought,
* never into an earlier batch. When we increase nbatch, we rescan the hash
* table and dump out any tuples that are now of a later batch to the correct
* inner batch file. Subsequently, while reading either inner or outer batch
* files, we might find tuples that no longer belong to the current batch;
* if so, we just dump them out to the correct batch file.
* ----------------------------------------------------------------
*/
/* these are in nodes/execnodes.h: */
/* typedef struct HashJoinTupleData *HashJoinTuple; */
/* typedef struct HashJoinTableData *HashJoinTable; */
typedef struct HashJoinTupleData
{
/* link to next tuple in same bucket */
union
{
struct HashJoinTupleData *unshared;
dsa_pointer shared;
} next;
uint32 hashvalue; /* tuple's hash code */
/* Tuple data, in MinimalTuple format, follows on a MAXALIGN boundary */
} HashJoinTupleData;
#define HJTUPLE_OVERHEAD MAXALIGN(sizeof(HashJoinTupleData))
#define HJTUPLE_MINTUPLE(hjtup) \
((MinimalTuple) ((char *) (hjtup) + HJTUPLE_OVERHEAD))
/*
* If the outer relation's distribution is sufficiently nonuniform, we attempt
* to optimize the join by treating the hash values corresponding to the outer
* relation's MCVs specially. Inner relation tuples matching these hash
* values go into the "skew" hashtable instead of the main hashtable, and
* outer relation tuples with these hash values are matched against that
* table instead of the main one. Thus, tuples with these hash values are
* effectively handled as part of the first batch and will never go to disk.
* The skew hashtable is limited to SKEW_HASH_MEM_PERCENT of the total memory
* allowed for the join; while building the hashtables, we decrease the number
* of MCVs being specially treated if needed to stay under this limit.
*
* Note: you might wonder why we look at the outer relation stats for this,
* rather than the inner. One reason is that the outer relation is typically
* bigger, so we get more I/O savings by optimizing for its most common values.
* Also, for similarly-sized relations, the planner prefers to put the more
* uniformly distributed relation on the inside, so we're more likely to find
* interesting skew in the outer relation.
*/
typedef struct HashSkewBucket
{
uint32 hashvalue; /* common hash value */
HashJoinTuple tuples; /* linked list of inner-relation tuples */
} HashSkewBucket;
#define SKEW_BUCKET_OVERHEAD MAXALIGN(sizeof(HashSkewBucket))
#define INVALID_SKEW_BUCKET_NO (-1)
#define SKEW_HASH_MEM_PERCENT 2
#define SKEW_MIN_OUTER_FRACTION 0.01
/*
* To reduce palloc overhead, the HashJoinTuples for the current batch are
* packed in 32kB buffers instead of pallocing each tuple individually.
*/
typedef struct HashMemoryChunkData
{
int ntuples; /* number of tuples stored in this chunk */
size_t maxlen; /* size of the chunk's tuple buffer */
size_t used; /* number of buffer bytes already used */
/* pointer to the next chunk (linked list) */
union
{
struct HashMemoryChunkData *unshared;
dsa_pointer shared;
} next;
/*
* The chunk's tuple buffer starts after the HashMemoryChunkData struct,
* at offset HASH_CHUNK_HEADER_SIZE (which must be maxaligned). Note that
* that offset is not included in "maxlen" or "used".
*/
} HashMemoryChunkData;
typedef struct HashMemoryChunkData *HashMemoryChunk;
#define HASH_CHUNK_SIZE (32 * 1024L)
#define HASH_CHUNK_HEADER_SIZE MAXALIGN(sizeof(HashMemoryChunkData))
#define HASH_CHUNK_DATA(hc) (((char *) (hc)) + HASH_CHUNK_HEADER_SIZE)
/* tuples exceeding HASH_CHUNK_THRESHOLD bytes are put in their own chunk */
#define HASH_CHUNK_THRESHOLD (HASH_CHUNK_SIZE / 4)
/*
* For each batch of a Parallel Hash Join, we have a ParallelHashJoinBatch
* object in shared memory to coordinate access to it. Since they are
* followed by variable-sized objects, they are arranged in contiguous memory
* but not accessed directly as an array.
*/
typedef struct ParallelHashJoinBatch
{
dsa_pointer buckets; /* array of hash table buckets */
Barrier batch_barrier; /* synchronization for joining this batch */
dsa_pointer chunks; /* chunks of tuples loaded */
size_t size; /* size of buckets + chunks in memory */
size_t estimated_size; /* size of buckets + chunks while writing */
size_t ntuples; /* number of tuples loaded */
size_t old_ntuples; /* number of tuples before repartitioning */
bool space_exhausted;
bool skip_unmatched; /* whether to abandon unmatched scan */
/*
* Variable-sized SharedTuplestore objects follow this struct in memory.
* See the accessor macros below.
*/
} ParallelHashJoinBatch;
/* Accessor for inner batch tuplestore following a ParallelHashJoinBatch. */
#define ParallelHashJoinBatchInner(batch) \
((SharedTuplestore *) \
((char *) (batch) + MAXALIGN(sizeof(ParallelHashJoinBatch))))
/* Accessor for outer batch tuplestore following a ParallelHashJoinBatch. */
#define ParallelHashJoinBatchOuter(batch, nparticipants) \
((SharedTuplestore *) \
((char *) ParallelHashJoinBatchInner(batch) + \
MAXALIGN(sts_estimate(nparticipants))))
/* Total size of a ParallelHashJoinBatch and tuplestores. */
#define EstimateParallelHashJoinBatch(hashtable) \
(MAXALIGN(sizeof(ParallelHashJoinBatch)) + \
MAXALIGN(sts_estimate((hashtable)->parallel_state->nparticipants)) * 2)
/* Accessor for the nth ParallelHashJoinBatch given the base. */
#define NthParallelHashJoinBatch(base, n) \
((ParallelHashJoinBatch *) \
((char *) (base) + \
EstimateParallelHashJoinBatch(hashtable) * (n)))
/*
* Each backend requires a small amount of per-batch state to interact with
* each ParallelHashJoinBatch.
*/
typedef struct ParallelHashJoinBatchAccessor
{
ParallelHashJoinBatch *shared; /* pointer to shared state */
/* Per-backend partial counters to reduce contention. */
size_t preallocated; /* pre-allocated space for this backend */
size_t ntuples; /* number of tuples */
size_t size; /* size of partition in memory */
size_t estimated_size; /* size of partition on disk */
size_t old_ntuples; /* how many tuples before repartitioning? */
bool at_least_one_chunk; /* has this backend allocated a chunk? */
bool outer_eof; /* has this process hit end of batch? */
bool done; /* flag to remember that a batch is done */
SharedTuplestoreAccessor *inner_tuples;
SharedTuplestoreAccessor *outer_tuples;
} ParallelHashJoinBatchAccessor;
/*
* While hashing the inner relation, any participant might determine that it's
* time to increase the number of buckets to reduce the load factor or batches
* to reduce the memory size. This is indicated by setting the growth flag to
* these values.
*/
typedef enum ParallelHashGrowth
{
/* The current dimensions are sufficient. */
PHJ_GROWTH_OK,
/* The load factor is too high, so we need to add buckets. */
PHJ_GROWTH_NEED_MORE_BUCKETS,
/* The memory budget would be exhausted, so we need to repartition. */
PHJ_GROWTH_NEED_MORE_BATCHES,
/* Repartitioning didn't help last time, so don't try to do that again. */
PHJ_GROWTH_DISABLED
} ParallelHashGrowth;
/*
* The shared state used to coordinate a Parallel Hash Join. This is stored
* in the DSM segment.
*/
typedef struct ParallelHashJoinState
{
dsa_pointer batches; /* array of ParallelHashJoinBatch */
dsa_pointer old_batches; /* previous generation during repartition */
int nbatch; /* number of batches now */
int old_nbatch; /* previous number of batches */
int nbuckets; /* number of buckets */
ParallelHashGrowth growth; /* control batch/bucket growth */
dsa_pointer chunk_work_queue; /* chunk work queue */
int nparticipants;
size_t space_allowed;
size_t total_tuples; /* total number of inner tuples */
LWLock lock; /* lock protecting the above */
Barrier build_barrier; /* synchronization for the build phases */
Barrier grow_batches_barrier;
Barrier grow_buckets_barrier;
pg_atomic_uint32 distributor; /* counter for load balancing */
SharedFileSet fileset; /* space for shared temporary files */
} ParallelHashJoinState;
/* The phases for building batches, used by build_barrier. */
#define PHJ_BUILD_ELECT 0
#define PHJ_BUILD_ALLOCATE 1
#define PHJ_BUILD_HASH_INNER 2
#define PHJ_BUILD_HASH_OUTER 3
#define PHJ_BUILD_RUN 4
#define PHJ_BUILD_FREE 5
/* The phases for probing each batch, used by for batch_barrier. */
#define PHJ_BATCH_ELECT 0
#define PHJ_BATCH_ALLOCATE 1
#define PHJ_BATCH_LOAD 2
#define PHJ_BATCH_PROBE 3
#define PHJ_BATCH_SCAN 4
#define PHJ_BATCH_FREE 5
/* The phases of batch growth while hashing, for grow_batches_barrier. */
#define PHJ_GROW_BATCHES_ELECT 0
#define PHJ_GROW_BATCHES_REALLOCATE 1
#define PHJ_GROW_BATCHES_REPARTITION 2
#define PHJ_GROW_BATCHES_DECIDE 3
#define PHJ_GROW_BATCHES_FINISH 4
#define PHJ_GROW_BATCHES_PHASE(n) ((n) % 5) /* circular phases */
/* The phases of bucket growth while hashing, for grow_buckets_barrier. */
#define PHJ_GROW_BUCKETS_ELECT 0
#define PHJ_GROW_BUCKETS_REALLOCATE 1
#define PHJ_GROW_BUCKETS_REINSERT 2
#define PHJ_GROW_BUCKETS_PHASE(n) ((n) % 3) /* circular phases */
typedef struct HashJoinTableData
{
int nbuckets; /* # buckets in the in-memory hash table */
int log2_nbuckets; /* its log2 (nbuckets must be a power of 2) */
int nbuckets_original; /* # buckets when starting the first hash */
int nbuckets_optimal; /* optimal # buckets (per batch) */
int log2_nbuckets_optimal; /* log2(nbuckets_optimal) */
/* buckets[i] is head of list of tuples in i'th in-memory bucket */
union
{
/* unshared array is per-batch storage, as are all the tuples */
struct HashJoinTupleData **unshared;
/* shared array is per-query DSA area, as are all the tuples */
dsa_pointer_atomic *shared;
} buckets;
bool keepNulls; /* true to store unmatchable NULL tuples */
bool skewEnabled; /* are we using skew optimization? */
HashSkewBucket **skewBucket; /* hashtable of skew buckets */
int skewBucketLen; /* size of skewBucket array (a power of 2!) */
int nSkewBuckets; /* number of active skew buckets */
int *skewBucketNums; /* array indexes of active skew buckets */
int nbatch; /* number of batches */
int curbatch; /* current batch #; 0 during 1st pass */
int nbatch_original; /* nbatch when we started inner scan */
int nbatch_outstart; /* nbatch when we started outer scan */
bool growEnabled; /* flag to shut off nbatch increases */
double totalTuples; /* # tuples obtained from inner plan */
double partialTuples; /* # tuples obtained from inner plan by me */
double skewTuples; /* # tuples inserted into skew tuples */
/*
* These arrays are allocated for the life of the hash join, but only if
* nbatch > 1. A file is opened only when we first write a tuple into it
* (otherwise its pointer remains NULL). Note that the zero'th array
* elements never get used, since we will process rather than dump out any
* tuples of batch zero.
*/
BufFile **innerBatchFile; /* buffered virtual temp file per batch */
BufFile **outerBatchFile; /* buffered virtual temp file per batch */
/*
* Info about the datatype-specific hash functions for the datatypes being
* hashed. These are arrays of the same length as the number of hash join
* clauses (hash keys).
*/
FmgrInfo *outer_hashfunctions; /* lookup data for hash functions */
FmgrInfo *inner_hashfunctions; /* lookup data for hash functions */
bool *hashStrict; /* is each hash join operator strict? */
Oid *collations;
Size spaceUsed; /* memory space currently used by tuples */
Size spaceAllowed; /* upper limit for space used */
Size spacePeak; /* peak space used */
Size spaceUsedSkew; /* skew hash table's current space usage */
Size spaceAllowedSkew; /* upper limit for skew hashtable */
MemoryContext hashCxt; /* context for whole-hash-join storage */
MemoryContext batchCxt; /* context for this-batch-only storage */
MemoryContext spillCxt; /* context for spilling to temp files */
/* used for dense allocation of tuples (into linked chunks) */
HashMemoryChunk chunks; /* one list for the whole batch */
/* Shared and private state for Parallel Hash. */
HashMemoryChunk current_chunk; /* this backend's current chunk */
dsa_area *area; /* DSA area to allocate memory from */
ParallelHashJoinState *parallel_state;
ParallelHashJoinBatchAccessor *batches;
dsa_pointer current_chunk_shared;
} HashJoinTableData;
#endif /* HASHJOIN_H */
|
78a8bcf418622a73e0f83f9d1c8eb33d68790081
|
28d0f8c01599f8f6c711bdde0b59f9c2cd221203
|
/sys/dev/ic/sl811hs.c
|
88d835518594bf64c1b334e7536fd3abd9de165a
|
[] |
no_license
|
NetBSD/src
|
1a9cbc22ed778be638b37869ed4fb5c8dd616166
|
23ee83f7c0aea0777bd89d8ebd7f0cde9880d13c
|
refs/heads/trunk
| 2023-08-31T13:24:58.105962
| 2023-08-27T15:50:47
| 2023-08-27T15:50:47
| 88,439,547
| 656
| 348
| null | 2023-07-20T20:07:24
| 2017-04-16T20:03:43
| null |
UTF-8
|
C
| false
| false
| 97,915
|
c
|
sl811hs.c
|
/* $NetBSD: sl811hs.c,v 1.112 2022/05/03 20:52:32 andvar Exp $ */
/*
* Not (c) 2007 Matthew Orgass
* This file is public domain, meaning anyone can make any use of part or all
* of this file including copying into other works without credit. Any use,
* modified or not, is solely the responsibility of the user. If this file is
* part of a collection then use in the collection is governed by the terms of
* the collection.
*/
/*
* Cypress/ScanLogic SL811HS/T USB Host Controller
* Datasheet, Errata, and App Note available at www.cypress.com
*
* Uses: Ratoc CFU1U PCMCIA USB Host Controller, Nereid X68k USB HC, ISA
* HCs. The Ratoc CFU2 uses a different chip.
*
* This chip puts the serial in USB. It implements USB by means of an eight
* bit I/O interface. It can be used for ISA, PCMCIA/CF, parallel port,
* serial port, or any eight bit interface. It has 256 bytes of memory, the
* first 16 of which are used for register access. There are two sets of
* registers for sending individual bus transactions. Because USB is polled,
* this organization means that some amount of card access must often be made
* when devices are attached, even if when they are not directly being used.
* A per-ms frame interrupt is necessary and many devices will poll with a
* per-frame bulk transfer.
*
* It is possible to write a little over two bytes to the chip (auto
* incremented) per full speed byte time on the USB. Unfortunately,
* auto-increment does not work reliably so write and bus speed is
* approximately the same for full speed devices.
*
* In addition to the 240 byte packet size limit for isochronous transfers,
* this chip has no means of determining the current frame number other than
* getting all 1ms SOF interrupts, which is not always possible even on a fast
* system. Isochronous transfers guarantee that transfers will never be
* retried in a later frame, so this can cause problems with devices beyond
* the difficulty in actually performing the transfer most frames. I tried
* implementing isoc transfers and was able to play CD-derrived audio via an
* iMic on a 2GHz PC, however it would still be interrupted at times and
* once interrupted, would stay out of sync. All isoc support has been
* removed.
*
* BUGS: all chip revisions have problems with low speed devices through hubs.
* The chip stops generating SOF with hubs that send SE0 during SOF. See
* comment in dointr(). All performance enhancing features of this chip seem
* not to work properly, most confirmed buggy in errata doc.
*
*/
/*
* The hard interrupt is the main entry point. Start, callbacks, and repeat
* are the only others called frequently.
*
* Since this driver attaches to pcmcia, card removal at any point should be
* expected and not cause panics or infinite loops.
*/
/*
* XXX TODO:
* copy next output packet while transferring
* usb suspend
* could keep track of known values of all buffer space?
* combined print/log function for errors
*
* ub_usepolling support is untested and may not work
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sl811hs.c,v 1.112 2022/05/03 20:52:32 andvar Exp $");
#ifdef _KERNEL_OPT
#include "opt_slhci.h"
#include "opt_usb.h"
#endif
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/cpu.h>
#include <sys/device.h>
#include <sys/gcq.h>
#include <sys/intr.h>
#include <sys/kernel.h>
#include <sys/kmem.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usbdivar.h>
#include <dev/usb/usbhist.h>
#include <dev/usb/usb_mem.h>
#include <dev/usb/usbdevs.h>
#include <dev/usb/usbroothub.h>
#include <dev/ic/sl811hsreg.h>
#include <dev/ic/sl811hsvar.h>
#define Q_CB 0 /* Control/Bulk */
#define Q_NEXT_CB 1
#define Q_MAX_XFER Q_CB
#define Q_CALLBACKS 2
#define Q_MAX Q_CALLBACKS
#define F_AREADY (0x00000001)
#define F_BREADY (0x00000002)
#define F_AINPROG (0x00000004)
#define F_BINPROG (0x00000008)
#define F_LOWSPEED (0x00000010)
#define F_UDISABLED (0x00000020) /* Consider disabled for USB */
#define F_NODEV (0x00000040)
#define F_ROOTINTR (0x00000080)
#define F_REALPOWER (0x00000100) /* Actual power state */
#define F_POWER (0x00000200) /* USB reported power state */
#define F_ACTIVE (0x00000400)
#define F_CALLBACK (0x00000800) /* Callback scheduled */
#define F_SOFCHECK1 (0x00001000)
#define F_SOFCHECK2 (0x00002000)
#define F_CRESET (0x00004000) /* Reset done not reported */
#define F_CCONNECT (0x00008000) /* Connect change not reported */
#define F_RESET (0x00010000)
#define F_ISOC_WARNED (0x00020000)
#define F_LSVH_WARNED (0x00040000)
#define F_DISABLED (F_NODEV|F_UDISABLED)
#define F_CHANGE (F_CRESET|F_CCONNECT)
#ifdef SLHCI_TRY_LSVH
unsigned int slhci_try_lsvh = 1;
#else
unsigned int slhci_try_lsvh = 0;
#endif
#define ADR 0
#define LEN 1
#define PID 2
#define DEV 3
#define STAT 2
#define CONT 3
#define A 0
#define B 1
static const uint8_t slhci_tregs[2][4] =
{{SL11_E0ADDR, SL11_E0LEN, SL11_E0PID, SL11_E0DEV },
{SL11_E1ADDR, SL11_E1LEN, SL11_E1PID, SL11_E1DEV }};
#define PT_ROOT_CTRL 0
#define PT_ROOT_INTR 1
#define PT_CTRL_SETUP 2
#define PT_CTRL_DATA 3
#define PT_CTRL_STATUS 4
#define PT_INTR 5
#define PT_BULK 6
#define PT_MAX 6
#ifdef SLHCI_DEBUG
#define SLHCI_MEM_ACCOUNTING
#endif
/*
* Maximum allowable reserved bus time. Since intr/isoc transfers have
* unconditional priority, this is all that ensures control and bulk transfers
* get a chance. It is a single value for all frames since all transfers can
* use multiple consecutive frames if an error is encountered. Note that it
* is not really possible to fill the bus with transfers, so this value should
* be on the low side. Defaults to giving a warning unless SLHCI_NO_OVERTIME
* is defined. Full time is 12000 - END_BUSTIME.
*/
#ifndef SLHCI_RESERVED_BUSTIME
#define SLHCI_RESERVED_BUSTIME 5000
#endif
/*
* Rate for "exceeds reserved bus time" warnings (default) or errors.
* Warnings only happen when an endpoint open causes the time to go above
* SLHCI_RESERVED_BUSTIME, not if it is already above.
*/
#ifndef SLHCI_OVERTIME_WARNING_RATE
#define SLHCI_OVERTIME_WARNING_RATE { 60, 0 } /* 60 seconds */
#endif
static const struct timeval reserved_warn_rate = SLHCI_OVERTIME_WARNING_RATE;
/*
* For EOF, the spec says 42 bit times, plus (I think) a possible hub skew of
* 20 bit times. By default leave 66 bit times to start the transfer beyond
* the required time. Units are full-speed bit times (a bit over 5us per 64).
* Only multiples of 64 are significant.
*/
#define SLHCI_STANDARD_END_BUSTIME 128
#ifndef SLHCI_EXTRA_END_BUSTIME
#define SLHCI_EXTRA_END_BUSTIME 0
#endif
#define SLHCI_END_BUSTIME (SLHCI_STANDARD_END_BUSTIME+SLHCI_EXTRA_END_BUSTIME)
/*
* This is an approximation of the USB worst-case timings presented on p. 54 of
* the USB 1.1 spec translated to full speed bit times.
* FS = full speed with handshake, FSII = isoc in, FSIO = isoc out,
* FSI = isoc (worst case), LS = low speed
*/
#define SLHCI_FS_CONST 114
#define SLHCI_FSII_CONST 92
#define SLHCI_FSIO_CONST 80
#define SLHCI_FSI_CONST 92
#define SLHCI_LS_CONST 804
#ifndef SLHCI_PRECICE_BUSTIME
/*
* These values are < 3% too high (compared to the multiply and divide) for
* max sized packets.
*/
#define SLHCI_FS_DATA_TIME(len) (((u_int)(len)<<3)+(len)+((len)>>1))
#define SLHCI_LS_DATA_TIME(len) (((u_int)(len)<<6)+((u_int)(len)<<4))
#else
#define SLHCI_FS_DATA_TIME(len) (56*(len)/6)
#define SLHCI_LS_DATA_TIME(len) (449*(len)/6)
#endif
/*
* Set SLHCI_WAIT_SIZE to the desired maximum size of single FS transfer
* to poll for after starting a transfer. 64 gets all full speed transfers.
* Note that even if 0 polling will occur if data equal or greater than the
* transfer size is copied to the chip while the transfer is in progress.
* Setting SLHCI_WAIT_TIME to -12000 will disable polling.
*/
#ifndef SLHCI_WAIT_SIZE
#define SLHCI_WAIT_SIZE 8
#endif
#ifndef SLHCI_WAIT_TIME
#define SLHCI_WAIT_TIME (SLHCI_FS_CONST + \
SLHCI_FS_DATA_TIME(SLHCI_WAIT_SIZE))
#endif
const int slhci_wait_time = SLHCI_WAIT_TIME;
#ifndef SLHCI_MAX_RETRIES
#define SLHCI_MAX_RETRIES 3
#endif
/* Check IER values for corruption after this many unrecognized interrupts. */
#ifndef SLHCI_IER_CHECK_FREQUENCY
#ifdef SLHCI_DEBUG
#define SLHCI_IER_CHECK_FREQUENCY 1
#else
#define SLHCI_IER_CHECK_FREQUENCY 100
#endif
#endif
/* Note that buffer points to the start of the buffer for this transfer. */
struct slhci_pipe {
struct usbd_pipe pipe;
struct usbd_xfer *xfer; /* xfer in progress */
uint8_t *buffer; /* I/O buffer (if needed) */
struct gcq ap; /* All pipes */
struct gcq to; /* Timeout list */
struct gcq xq; /* Xfer queues */
unsigned int pflags; /* Pipe flags */
#define PF_GONE (0x01) /* Pipe is on disabled device */
#define PF_TOGGLE (0x02) /* Data toggle status */
#define PF_LS (0x04) /* Pipe is low speed */
#define PF_PREAMBLE (0x08) /* Needs preamble */
Frame to_frame; /* Frame number for timeout */
Frame frame; /* Frame number for intr xfer */
Frame lastframe; /* Previous frame number for intr */
uint16_t bustime; /* Worst case bus time usage */
uint16_t newbustime[2]; /* new bustimes (see index below) */
uint8_t tregs[4]; /* ADR, LEN, PID, DEV */
uint8_t newlen[2]; /* 0 = short data, 1 = ctrl data */
uint8_t newpid; /* for ctrl */
uint8_t wantshort; /* last xfer must be short */
uint8_t control; /* Host control register settings */
uint8_t nerrs; /* Current number of errors */
uint8_t ptype; /* Pipe type */
};
#define SLHCI_BUS2SC(bus) ((bus)->ub_hcpriv)
#define SLHCI_PIPE2SC(pipe) SLHCI_BUS2SC((pipe)->up_dev->ud_bus)
#define SLHCI_XFER2SC(xfer) SLHCI_BUS2SC((xfer)->ux_bus)
#define SLHCI_PIPE2SPIPE(pipe) ((struct slhci_pipe *)(pipe))
#define SLHCI_XFER2SPIPE(xfer) SLHCI_PIPE2SPIPE((xfer)->ux_pipe)
#define SLHCI_XFER_TYPE(x) (SLHCI_XFER2SPIPE(xfer)->ptype)
#ifdef SLHCI_PROFILE_TRANSFER
#if defined(__mips__)
/*
* MIPS cycle counter does not directly count cpu cycles but is a different
* fraction of cpu cycles depending on the cpu.
*/
typedef uint32_t cc_type;
#define CC_TYPE_FMT "%u"
#define slhci_cc_set(x) __asm volatile ("mfc0 %[cc], $9\n\tnop\n\tnop\n\tnop" \
: [cc] "=r"(x))
#elif defined(__i386__)
typedef uint64_t cc_type;
#define CC_TYPE_FMT "%llu"
#define slhci_cc_set(x) __asm volatile ("rdtsc" : "=A"(x))
#else
#error "SLHCI_PROFILE_TRANSFER not implemented on this MACHINE_ARCH (see sys/dev/ic/sl811hs.c)"
#endif
struct slhci_cc_time {
cc_type start;
cc_type stop;
unsigned int miscdata;
};
#ifndef SLHCI_N_TIMES
#define SLHCI_N_TIMES 200
#endif
struct slhci_cc_times {
struct slhci_cc_time times[SLHCI_N_TIMES];
int current;
int wraparound;
};
static struct slhci_cc_times t_ab[2];
static struct slhci_cc_times t_abdone;
static struct slhci_cc_times t_copy_to_dev;
static struct slhci_cc_times t_copy_from_dev;
static struct slhci_cc_times t_intr;
static struct slhci_cc_times t_lock;
static struct slhci_cc_times t_delay;
static struct slhci_cc_times t_hard_int;
static struct slhci_cc_times t_callback;
static inline void
start_cc_time(struct slhci_cc_times *times, unsigned int misc) {
times->times[times->current].miscdata = misc;
slhci_cc_set(times->times[times->current].start);
}
static inline void
stop_cc_time(struct slhci_cc_times *times) {
slhci_cc_set(times->times[times->current].stop);
if (++times->current >= SLHCI_N_TIMES) {
times->current = 0;
times->wraparound = 1;
}
}
void slhci_dump_cc_times(int);
void
slhci_dump_cc_times(int n) {
struct slhci_cc_times *times;
int i;
switch (n) {
default:
case 0:
printf("USBA start transfer to intr:\n");
times = &t_ab[A];
break;
case 1:
printf("USBB start transfer to intr:\n");
times = &t_ab[B];
break;
case 2:
printf("abdone:\n");
times = &t_abdone;
break;
case 3:
printf("copy to device:\n");
times = &t_copy_to_dev;
break;
case 4:
printf("copy from device:\n");
times = &t_copy_from_dev;
break;
case 5:
printf("intr to intr:\n");
times = &t_intr;
break;
case 6:
printf("lock to release:\n");
times = &t_lock;
break;
case 7:
printf("delay time:\n");
times = &t_delay;
break;
case 8:
printf("hard interrupt enter to exit:\n");
times = &t_hard_int;
break;
case 9:
printf("callback:\n");
times = &t_callback;
break;
}
if (times->wraparound)
for (i = times->current + 1; i < SLHCI_N_TIMES; i++)
printf("start " CC_TYPE_FMT " stop " CC_TYPE_FMT
" difference %8i miscdata %#x\n",
times->times[i].start, times->times[i].stop,
(int)(times->times[i].stop -
times->times[i].start), times->times[i].miscdata);
for (i = 0; i < times->current; i++)
printf("start " CC_TYPE_FMT " stop " CC_TYPE_FMT
" difference %8i miscdata %#x\n", times->times[i].start,
times->times[i].stop, (int)(times->times[i].stop -
times->times[i].start), times->times[i].miscdata);
}
#else
#define start_cc_time(x, y)
#define stop_cc_time(x)
#endif /* SLHCI_PROFILE_TRANSFER */
typedef usbd_status (*LockCallFunc)(struct slhci_softc *, struct slhci_pipe
*, struct usbd_xfer *);
struct usbd_xfer * slhci_allocx(struct usbd_bus *, unsigned int);
void slhci_freex(struct usbd_bus *, struct usbd_xfer *);
static void slhci_get_lock(struct usbd_bus *, kmutex_t **);
usbd_status slhci_transfer(struct usbd_xfer *);
usbd_status slhci_start(struct usbd_xfer *);
usbd_status slhci_root_start(struct usbd_xfer *);
usbd_status slhci_open(struct usbd_pipe *);
static int slhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
void *, int);
/*
* slhci_supported_rev, slhci_preinit, slhci_attach, slhci_detach,
* slhci_activate
*/
void slhci_abort(struct usbd_xfer *);
void slhci_close(struct usbd_pipe *);
void slhci_clear_toggle(struct usbd_pipe *);
void slhci_poll(struct usbd_bus *);
void slhci_done(struct usbd_xfer *);
void slhci_void(void *);
/* lock entry functions */
#ifdef SLHCI_MEM_ACCOUNTING
void slhci_mem_use(struct usbd_bus *, int);
#endif
void slhci_reset_entry(void *);
usbd_status slhci_lock_call(struct slhci_softc *, LockCallFunc,
struct slhci_pipe *, struct usbd_xfer *);
void slhci_start_entry(struct slhci_softc *, struct slhci_pipe *);
void slhci_callback_entry(void *arg);
void slhci_do_callback(struct slhci_softc *, struct usbd_xfer *);
/* slhci_intr */
void slhci_main(struct slhci_softc *);
/* in lock functions */
static void slhci_write(struct slhci_softc *, uint8_t, uint8_t);
static uint8_t slhci_read(struct slhci_softc *, uint8_t);
static void slhci_write_multi(struct slhci_softc *, uint8_t, uint8_t *, int);
static void slhci_read_multi(struct slhci_softc *, uint8_t, uint8_t *, int);
static void slhci_waitintr(struct slhci_softc *, int);
static int slhci_dointr(struct slhci_softc *);
static void slhci_abdone(struct slhci_softc *, int);
static void slhci_tstart(struct slhci_softc *);
static void slhci_dotransfer(struct slhci_softc *);
static void slhci_callback(struct slhci_softc *);
static void slhci_enter_xfer(struct slhci_softc *, struct slhci_pipe *);
static void slhci_enter_xfers(struct slhci_softc *);
static void slhci_queue_timed(struct slhci_softc *, struct slhci_pipe *);
static void slhci_xfer_timer(struct slhci_softc *, struct slhci_pipe *);
static void slhci_callback_schedule(struct slhci_softc *);
static void slhci_do_callback_schedule(struct slhci_softc *);
#if 0
void slhci_pollxfer(struct slhci_softc *, struct usbd_xfer *); /* XXX */
#endif
static usbd_status slhci_do_poll(struct slhci_softc *, struct slhci_pipe *,
struct usbd_xfer *);
static usbd_status slhci_lsvh_warn(struct slhci_softc *, struct slhci_pipe *,
struct usbd_xfer *);
static usbd_status slhci_isoc_warn(struct slhci_softc *, struct slhci_pipe *,
struct usbd_xfer *);
static usbd_status slhci_open_pipe(struct slhci_softc *, struct slhci_pipe *,
struct usbd_xfer *);
static usbd_status slhci_close_pipe(struct slhci_softc *, struct slhci_pipe *,
struct usbd_xfer *);
static usbd_status slhci_do_abort(struct slhci_softc *, struct slhci_pipe *,
struct usbd_xfer *);
static usbd_status slhci_halt(struct slhci_softc *, struct slhci_pipe *,
struct usbd_xfer *);
static void slhci_intrchange(struct slhci_softc *, uint8_t);
static void slhci_drain(struct slhci_softc *);
static void slhci_reset(struct slhci_softc *);
static int slhci_reserve_bustime(struct slhci_softc *, struct slhci_pipe *,
int);
static void slhci_insert(struct slhci_softc *);
static usbd_status slhci_clear_feature(struct slhci_softc *, unsigned int);
static usbd_status slhci_set_feature(struct slhci_softc *, unsigned int);
static void slhci_get_status(struct slhci_softc *, usb_port_status_t *);
#define SLHCIHIST_FUNC() USBHIST_FUNC()
#define SLHCIHIST_CALLED() USBHIST_CALLED(slhcidebug)
#ifdef SLHCI_DEBUG
static int slhci_memtest(struct slhci_softc *);
void slhci_log_buffer(struct usbd_xfer *);
void slhci_log_req(usb_device_request_t *);
void slhci_log_dumpreg(void);
void slhci_log_xfer(struct usbd_xfer *);
void slhci_log_spipe(struct slhci_pipe *);
void slhci_print_intr(void);
void slhci_log_sc(void);
void slhci_log_slreq(struct slhci_pipe *);
/* Constified so you can read the values from ddb */
const int SLHCI_D_TRACE = 0x0001;
const int SLHCI_D_MSG = 0x0002;
const int SLHCI_D_XFER = 0x0004;
const int SLHCI_D_MEM = 0x0008;
const int SLHCI_D_INTR = 0x0010;
const int SLHCI_D_SXFER = 0x0020;
const int SLHCI_D_ERR = 0x0080;
const int SLHCI_D_BUF = 0x0100;
const int SLHCI_D_SOFT = 0x0200;
const int SLHCI_D_WAIT = 0x0400;
const int SLHCI_D_ROOT = 0x0800;
/* SOF/NAK alone normally ignored, SOF also needs D_INTR */
const int SLHCI_D_SOF = 0x1000;
const int SLHCI_D_NAK = 0x2000;
int slhcidebug = 0x1cbc; /* 0xc8c; */ /* 0xffff; */ /* 0xd8c; */
SYSCTL_SETUP(sysctl_hw_slhci_setup, "sysctl hw.slhci setup")
{
int err;
const struct sysctlnode *rnode;
const struct sysctlnode *cnode;
err = sysctl_createv(clog, 0, NULL, &rnode,
CTLFLAG_PERMANENT, CTLTYPE_NODE, "slhci",
SYSCTL_DESCR("slhci global controls"),
NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
if (err)
goto fail;
/* control debugging printfs */
err = sysctl_createv(clog, 0, &rnode, &cnode,
CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
"debug", SYSCTL_DESCR("Enable debugging output"),
NULL, 0, &slhcidebug, sizeof(slhcidebug), CTL_CREATE, CTL_EOL);
if (err)
goto fail;
return;
fail:
aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
}
struct slhci_softc *ssc;
#define SLHCI_DEXEC(x, y) do { if ((slhcidebug & SLHCI_ ## x)) { y; } \
} while (/*CONSTCOND*/ 0)
#define DDOLOG(f, a, b, c, d) do { KERNHIST_LOG(usbhist, f, a, b, c, d); \
} while (/*CONSTCOND*/0)
#define DLOG(x, f, a, b, c, d) SLHCI_DEXEC(x, DDOLOG(f, a, b, c, d))
/*
* DDOLOGBUF logs a buffer up to 8 bytes at a time. No identifier so that we
* can make it a real function.
*/
static void
DDOLOGBUF(uint8_t *buf, unsigned int length)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
int i;
for(i = 0; i + 8 <= length; i += 8)
DDOLOG("%.4x %.4x %.4x %.4x", (buf[i] << 8) | buf[i+1],
(buf[i+2] << 8) | buf[i+3], (buf[i+4] << 8) | buf[i+5],
(buf[i+6] << 8) | buf[i+7]);
if (length == i + 7)
DDOLOG("%.4x %.4x %.4x %.2x", (buf[i] << 8) | buf[i+1],
(buf[i+2] << 8) | buf[i+3], (buf[i+4] << 8) | buf[i+5],
buf[i+6]);
else if (length == i + 6)
DDOLOG("%.4x %.4x %.4x", (buf[i] << 8) | buf[i+1],
(buf[i+2] << 8) | buf[i+3], (buf[i+4] << 8) | buf[i+5], 0);
else if (length == i + 5)
DDOLOG("%.4x %.4x %.2x", (buf[i] << 8) | buf[i+1],
(buf[i+2] << 8) | buf[i+3], buf[i+4], 0);
else if (length == i + 4)
DDOLOG("%.4x %.4x", (buf[i] << 8) | buf[i+1],
(buf[i+2] << 8) | buf[i+3], 0,0);
else if (length == i + 3)
DDOLOG("%.4x %.2x", (buf[i] << 8) | buf[i+1], buf[i+2], 0,0);
else if (length == i + 2)
DDOLOG("%.4x", (buf[i] << 8) | buf[i+1], 0,0,0);
else if (length == i + 1)
DDOLOG("%.2x", buf[i], 0,0,0);
}
#define DLOGBUF(x, b, l) SLHCI_DEXEC(x, DDOLOGBUF(b, l))
#define DDOLOGCTRL(x) do { \
DDOLOG("CTRL suspend=%jd", !!((x) & SL11_CTRL_SUSPEND), 0, 0, 0); \
DDOLOG("CTRL ls =%jd jk =%jd reset =%jd sof =%jd", \
!!((x) & SL11_CTRL_LOWSPEED), !!((x) & SL11_CTRL_JKSTATE), \
!!((x) & SL11_CTRL_RESETENGINE), !!((x) & SL11_CTRL_ENABLESOF));\
} while (0)
#define DDOLOGISR(r) do { \
DDOLOG("ISR data =%jd det/res=%jd insert =%jd sof =%jd", \
!!((r) & SL11_ISR_DATA), !!((r) & SL11_ISR_RESUME), \
!!((r) & SL11_ISR_INSERT), !!!!((r) & SL11_ISR_SOF)); \
DDOLOG("ISR babble =%jd usbb =%jd usba =%jd", \
!!((r) & SL11_ISR_BABBLE), !!((r) & SL11_ISR_USBB), \
!!((r) & SL11_ISR_USBA), 0); \
} while (0)
#define DDOLOGIER(r) do { \
DDOLOG("IER det/res=%d insert =%d sof =%d", \
!!((r) & SL11_IER_RESUME), \
!!((r) & SL11_IER_INSERT), !!!!((r) & SL11_IER_SOF), 0); \
DDOLOG("IER babble =%d usbb =%d usba =%d", \
!!((r) & SL11_IER_BABBLE), !!((r) & SL11_IER_USBB), \
!!((r) & SL11_IER_USBA), 0); \
} while (0)
#define DDOLOGSTATUS(s) do { \
DDOLOG("STAT stall =%d nak =%d overflow =%d setup =%d", \
!!((s) & SL11_EPSTAT_STALL), !!((s) & SL11_EPSTAT_NAK), \
!!((s) & SL11_EPSTAT_OVERFLOW), !!((s) & SL11_EPSTAT_SETUP)); \
DDOLOG("STAT sequence=%d timeout =%d error =%d ack =%d", \
!!((s) & SL11_EPSTAT_SEQUENCE), !!((s) & SL11_EPSTAT_TIMEOUT), \
!!((s) & SL11_EPSTAT_ERROR), !!((s) & SL11_EPSTAT_ACK)); \
} while (0)
#define DDOLOGEPCTRL(r) do { \
DDOLOG("CTRL preamble=%d toggle =%d sof =%d iso =%d", \
!!((r) & SL11_EPCTRL_PREAMBLE), !!((r) & SL11_EPCTRL_DATATOGGLE),\
!!((r) & SL11_EPCTRL_SOF), !!((r) & SL11_EPCTRL_ISO)); \
DDOLOG("CTRL out =%d enable =%d arm =%d", \
!!((r) & SL11_EPCTRL_DIRECTION), \
!!((r) & SL11_EPCTRL_ENABLE), !!((r) & SL11_EPCTRL_ARM), 0); \
} while (0)
#define DDOLOGEPSTAT(r) do { \
DDOLOG("STAT stall =%d nak =%d overflow =%d setup =%d", \
!!((r) & SL11_EPSTAT_STALL), !!((r) & SL11_EPSTAT_NAK), \
!!((r) & SL11_EPSTAT_OVERFLOW), !!((r) & SL11_EPSTAT_SETUP)); \
DDOLOG("STAT sequence=%d timeout =%d error =%d ack =%d", \
!!((r) & SL11_EPSTAT_SEQUENCE), !!((r) & SL11_EPSTAT_TIMEOUT), \
!!((r) & SL11_EPSTAT_ERROR), !!((r) & SL11_EPSTAT_ACK)); \
} while (0)
#else /* now !SLHCI_DEBUG */
#define slhcidebug 0
#define slhci_log_spipe(spipe) ((void)0)
#define slhci_log_xfer(xfer) ((void)0)
#define SLHCI_DEXEC(x, y) ((void)0)
#define DDOLOG(f, a, b, c, d) ((void)0)
#define DLOG(x, f, a, b, c, d) ((void)0)
#define DDOLOGBUF(b, l) ((void)0)
#define DLOGBUF(x, b, l) ((void)0)
#define DDOLOGCTRL(x) ((void)0)
#define DDOLOGISR(r) ((void)0)
#define DDOLOGIER(r) ((void)0)
#define DDOLOGSTATUS(s) ((void)0)
#define DDOLOGEPCTRL(r) ((void)0)
#define DDOLOGEPSTAT(r) ((void)0)
#endif /* SLHCI_DEBUG */
#ifdef DIAGNOSTIC
#define LK_SLASSERT(exp, sc, spipe, xfer, ext) do { \
if (!(exp)) { \
printf("%s: assertion %s failed line %u function %s!" \
" halted\n", SC_NAME(sc), #exp, __LINE__, __func__);\
slhci_halt(sc, spipe, xfer); \
ext; \
} \
} while (/*CONSTCOND*/0)
#define UL_SLASSERT(exp, sc, spipe, xfer, ext) do { \
if (!(exp)) { \
printf("%s: assertion %s failed line %u function %s!" \
" halted\n", SC_NAME(sc), #exp, __LINE__, __func__); \
slhci_lock_call(sc, &slhci_halt, spipe, xfer); \
ext; \
} \
} while (/*CONSTCOND*/0)
#else
#define LK_SLASSERT(exp, sc, spipe, xfer, ext) ((void)0)
#define UL_SLASSERT(exp, sc, spipe, xfer, ext) ((void)0)
#endif
const struct usbd_bus_methods slhci_bus_methods = {
.ubm_open = slhci_open,
.ubm_softint = slhci_void,
.ubm_dopoll = slhci_poll,
.ubm_allocx = slhci_allocx,
.ubm_freex = slhci_freex,
.ubm_getlock = slhci_get_lock,
.ubm_rhctrl = slhci_roothub_ctrl,
};
const struct usbd_pipe_methods slhci_pipe_methods = {
.upm_transfer = slhci_transfer,
.upm_start = slhci_start,
.upm_abort = slhci_abort,
.upm_close = slhci_close,
.upm_cleartoggle = slhci_clear_toggle,
.upm_done = slhci_done,
};
const struct usbd_pipe_methods slhci_root_methods = {
.upm_transfer = slhci_transfer,
.upm_start = slhci_root_start,
.upm_abort = slhci_abort,
.upm_close = (void (*)(struct usbd_pipe *))slhci_void, /* XXX safe? */
.upm_cleartoggle = slhci_clear_toggle,
.upm_done = slhci_done,
};
/* Queue inlines */
#define GOT_FIRST_TO(tvar, t) \
GCQ_GOT_FIRST_TYPED(tvar, &(t)->to, struct slhci_pipe, to)
#define FIND_TO(var, t, tvar, cond) \
GCQ_FIND_TYPED(var, &(t)->to, tvar, struct slhci_pipe, to, cond)
#define FOREACH_AP(var, t, tvar) \
GCQ_FOREACH_TYPED(var, &(t)->ap, tvar, struct slhci_pipe, ap)
#define GOT_FIRST_TIMED_COND(tvar, t, cond) \
GCQ_GOT_FIRST_COND_TYPED(tvar, &(t)->timed, struct slhci_pipe, xq, cond)
#define GOT_FIRST_CB(tvar, t) \
GCQ_GOT_FIRST_TYPED(tvar, &(t)->q[Q_CB], struct slhci_pipe, xq)
#define DEQUEUED_CALLBACK(tvar, t) \
GCQ_DEQUEUED_FIRST_TYPED(tvar, &(t)->q[Q_CALLBACKS], struct slhci_pipe, xq)
#define FIND_TIMED(var, t, tvar, cond) \
GCQ_FIND_TYPED(var, &(t)->timed, tvar, struct slhci_pipe, xq, cond)
#define DEQUEUED_WAITQ(tvar, sc) \
GCQ_DEQUEUED_FIRST_TYPED(tvar, &(sc)->sc_waitq, struct slhci_pipe, xq)
static inline void
enter_waitq(struct slhci_softc *sc, struct slhci_pipe *spipe)
{
gcq_insert_tail(&sc->sc_waitq, &spipe->xq);
}
static inline void
enter_q(struct slhci_transfers *t, struct slhci_pipe *spipe, int i)
{
gcq_insert_tail(&t->q[i], &spipe->xq);
}
static inline void
enter_callback(struct slhci_transfers *t, struct slhci_pipe *spipe)
{
gcq_insert_tail(&t->q[Q_CALLBACKS], &spipe->xq);
}
static inline void
enter_all_pipes(struct slhci_transfers *t, struct slhci_pipe *spipe)
{
gcq_insert_tail(&t->ap, &spipe->ap);
}
/* Start out of lock functions. */
struct usbd_xfer *
slhci_allocx(struct usbd_bus *bus, unsigned int nframes)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct usbd_xfer *xfer;
xfer = kmem_zalloc(sizeof(*xfer), KM_SLEEP);
DLOG(D_MEM, "allocx %#jx", (uintptr_t)xfer, 0,0,0);
#ifdef SLHCI_MEM_ACCOUNTING
slhci_mem_use(bus, 1);
#endif
#ifdef DIAGNOSTIC
if (xfer != NULL)
xfer->ux_state = XFER_BUSY;
#endif
return xfer;
}
void
slhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
DLOG(D_MEM, "freex xfer %#jx spipe %#jx",
(uintptr_t)xfer, (uintptr_t)xfer->ux_pipe,0,0);
#ifdef SLHCI_MEM_ACCOUNTING
slhci_mem_use(bus, -1);
#endif
#ifdef DIAGNOSTIC
if (xfer->ux_state != XFER_BUSY &&
xfer->ux_status != USBD_NOT_STARTED) {
struct slhci_softc *sc = SLHCI_BUS2SC(bus);
printf("%s: slhci_freex: xfer=%p not busy, %#08x halted\n",
SC_NAME(sc), xfer, xfer->ux_state);
DDOLOG("xfer=%p not busy, %#08x halted\n", xfer,
xfer->ux_state, 0, 0);
slhci_lock_call(sc, &slhci_halt, NULL, NULL);
return;
}
xfer->ux_state = XFER_FREE;
#endif
kmem_free(xfer, sizeof(*xfer));
}
static void
slhci_get_lock(struct usbd_bus *bus, kmutex_t **lock)
{
struct slhci_softc *sc = SLHCI_BUS2SC(bus);
*lock = &sc->sc_lock;
}
usbd_status
slhci_transfer(struct usbd_xfer *xfer)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
usbd_status error;
DLOG(D_TRACE, "transfer type %jd xfer %#jx spipe %#jx ",
SLHCI_XFER_TYPE(xfer), (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe,
0);
/* Pipe isn't running, so start it first. */
error = xfer->ux_pipe->up_methods->upm_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
return error;
}
/* It is not safe for start to return anything other than USBD_INPROG. */
usbd_status
slhci_start(struct usbd_xfer *xfer)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_softc *sc = SLHCI_XFER2SC(xfer);
struct usbd_pipe *pipe = xfer->ux_pipe;
struct slhci_pipe *spipe = SLHCI_PIPE2SPIPE(pipe);
struct slhci_transfers *t = &sc->sc_transfers;
usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc;
unsigned int max_packet;
KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
max_packet = UGETW(ed->wMaxPacketSize);
DLOG(D_TRACE, "transfer type %jd start xfer %#jx spipe %#jx length %jd",
spipe->ptype, (uintptr_t)xfer, (uintptr_t)spipe, xfer->ux_length);
/* root transfers use slhci_root_start */
KASSERT(spipe->xfer == NULL); /* not SLASSERT */
xfer->ux_actlen = 0;
xfer->ux_status = USBD_IN_PROGRESS;
spipe->xfer = xfer;
spipe->nerrs = 0;
spipe->frame = t->frame;
spipe->control = SL11_EPCTRL_ARM_ENABLE;
spipe->tregs[DEV] = pipe->up_dev->ud_addr;
spipe->tregs[PID] = spipe->newpid = UE_GET_ADDR(ed->bEndpointAddress)
| (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN ? SL11_PID_IN :
SL11_PID_OUT);
spipe->newlen[0] = xfer->ux_length % max_packet;
spipe->newlen[1] = uimin(xfer->ux_length, max_packet);
if (spipe->ptype == PT_BULK || spipe->ptype == PT_INTR) {
if (spipe->pflags & PF_TOGGLE)
spipe->control |= SL11_EPCTRL_DATATOGGLE;
spipe->tregs[LEN] = spipe->newlen[1];
if (spipe->tregs[LEN])
spipe->buffer = xfer->ux_buf;
else
spipe->buffer = NULL;
spipe->lastframe = t->frame;
if (spipe->ptype == PT_INTR) {
spipe->frame = spipe->lastframe +
spipe->pipe.up_interval;
}
#if defined(DEBUG) || defined(SLHCI_DEBUG)
if (__predict_false(spipe->ptype == PT_INTR &&
xfer->ux_length > spipe->tregs[LEN])) {
printf("%s: Long INTR transfer not supported!\n",
SC_NAME(sc));
DDOLOG("Long INTR transfer not supported!", 0, 0, 0, 0);
xfer->ux_status = USBD_INVAL;
}
#endif
} else {
/* ptype may be currently set to any control transfer type. */
SLHCI_DEXEC(D_TRACE, slhci_log_xfer(xfer));
/* SETUP contains IN/OUT bits also */
spipe->tregs[PID] |= SL11_PID_SETUP;
spipe->tregs[LEN] = 8;
spipe->buffer = (uint8_t *)&xfer->ux_request;
DLOGBUF(D_XFER, spipe->buffer, spipe->tregs[LEN]);
spipe->ptype = PT_CTRL_SETUP;
spipe->newpid &= ~SL11_PID_BITS;
if (xfer->ux_length == 0 ||
(xfer->ux_request.bmRequestType & UT_READ))
spipe->newpid |= SL11_PID_IN;
else
spipe->newpid |= SL11_PID_OUT;
}
if (xfer->ux_flags & USBD_FORCE_SHORT_XFER &&
spipe->tregs[LEN] == max_packet &&
(spipe->newpid & SL11_PID_BITS) == SL11_PID_OUT)
spipe->wantshort = 1;
else
spipe->wantshort = 0;
/*
* The goal of newbustime and newlen is to avoid bustime calculation
* in the interrupt. The calculations are not too complex, but they
* complicate the conditional logic somewhat and doing them all in the
* same place shares constants. Index 0 is "short length" for bulk and
* ctrl data and 1 is "full length" for ctrl data (bulk/intr are
* already set to full length).
*/
if (spipe->pflags & PF_LS) {
/*
* Setting PREAMBLE for directly connected LS devices will
* lock up the chip.
*/
if (spipe->pflags & PF_PREAMBLE)
spipe->control |= SL11_EPCTRL_PREAMBLE;
if (max_packet <= 8) {
spipe->bustime = SLHCI_LS_CONST +
SLHCI_LS_DATA_TIME(spipe->tregs[LEN]);
spipe->newbustime[0] = SLHCI_LS_CONST +
SLHCI_LS_DATA_TIME(spipe->newlen[0]);
spipe->newbustime[1] = SLHCI_LS_CONST +
SLHCI_LS_DATA_TIME(spipe->newlen[1]);
} else
xfer->ux_status = USBD_INVAL;
} else {
UL_SLASSERT(pipe->up_dev->ud_speed == USB_SPEED_FULL, sc,
spipe, xfer, return USBD_IN_PROGRESS);
if (max_packet <= SL11_MAX_PACKET_SIZE) {
spipe->bustime = SLHCI_FS_CONST +
SLHCI_FS_DATA_TIME(spipe->tregs[LEN]);
spipe->newbustime[0] = SLHCI_FS_CONST +
SLHCI_FS_DATA_TIME(spipe->newlen[0]);
spipe->newbustime[1] = SLHCI_FS_CONST +
SLHCI_FS_DATA_TIME(spipe->newlen[1]);
} else
xfer->ux_status = USBD_INVAL;
}
/*
* The datasheet incorrectly indicates that DIRECTION is for
* "transmit to host". It is for OUT and SETUP. The app note
* describes its use correctly.
*/
if ((spipe->tregs[PID] & SL11_PID_BITS) != SL11_PID_IN)
spipe->control |= SL11_EPCTRL_DIRECTION;
slhci_start_entry(sc, spipe);
return USBD_IN_PROGRESS;
}
usbd_status
slhci_root_start(struct usbd_xfer *xfer)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_softc *sc;
struct slhci_pipe *spipe __diagused;
spipe = SLHCI_PIPE2SPIPE(xfer->ux_pipe);
sc = SLHCI_XFER2SC(xfer);
struct slhci_transfers *t = &sc->sc_transfers;
LK_SLASSERT(spipe != NULL && xfer != NULL, sc, spipe, xfer, return
USBD_CANCELLED);
DLOG(D_TRACE, "transfer type %jd start",
SLHCI_XFER_TYPE(xfer), 0, 0, 0);
KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));
KASSERT(spipe->ptype == PT_ROOT_INTR);
KASSERT(t->rootintr == NULL);
t->rootintr = xfer;
xfer->ux_status = USBD_IN_PROGRESS;
return USBD_IN_PROGRESS;
}
usbd_status
slhci_open(struct usbd_pipe *pipe)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct usbd_device *dev;
struct slhci_softc *sc;
struct slhci_pipe *spipe;
usb_endpoint_descriptor_t *ed;
unsigned int max_packet, pmaxpkt;
uint8_t rhaddr;
dev = pipe->up_dev;
sc = SLHCI_PIPE2SC(pipe);
spipe = SLHCI_PIPE2SPIPE(pipe);
ed = pipe->up_endpoint->ue_edesc;
rhaddr = dev->ud_bus->ub_rhaddr;
DLOG(D_TRACE, "slhci_open(addr=%jd,ep=%jd,rootaddr=%jd)",
dev->ud_addr, ed->bEndpointAddress, rhaddr, 0);
spipe->pflags = 0;
spipe->frame = 0;
spipe->lastframe = 0;
spipe->xfer = NULL;
spipe->buffer = NULL;
gcq_init(&spipe->ap);
gcq_init(&spipe->to);
gcq_init(&spipe->xq);
/*
* The endpoint descriptor will not have been set up yet in the case
* of the standard control pipe, so the max packet checks are also
* necessary in start.
*/
max_packet = UGETW(ed->wMaxPacketSize);
if (dev->ud_speed == USB_SPEED_LOW) {
spipe->pflags |= PF_LS;
if (dev->ud_myhub->ud_addr != rhaddr) {
spipe->pflags |= PF_PREAMBLE;
if (!slhci_try_lsvh)
return slhci_lock_call(sc, &slhci_lsvh_warn,
spipe, NULL);
}
pmaxpkt = 8;
} else
pmaxpkt = SL11_MAX_PACKET_SIZE;
if (max_packet > pmaxpkt) {
DLOG(D_ERR, "packet too large! size %jd spipe %#jx", max_packet,
(uintptr_t)spipe, 0,0);
return USBD_INVAL;
}
if (dev->ud_addr == rhaddr) {
switch (ed->bEndpointAddress) {
case USB_CONTROL_ENDPOINT:
spipe->ptype = PT_ROOT_CTRL;
pipe->up_interval = 0;
pipe->up_methods = &roothub_ctrl_methods;
break;
case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
spipe->ptype = PT_ROOT_INTR;
pipe->up_interval = 1;
pipe->up_methods = &slhci_root_methods;
break;
default:
printf("%s: Invalid root endpoint!\n", SC_NAME(sc));
DDOLOG("Invalid root endpoint", 0, 0, 0, 0);
return USBD_INVAL;
}
return USBD_NORMAL_COMPLETION;
} else {
switch (ed->bmAttributes & UE_XFERTYPE) {
case UE_CONTROL:
spipe->ptype = PT_CTRL_SETUP;
pipe->up_interval = 0;
break;
case UE_INTERRUPT:
spipe->ptype = PT_INTR;
if (pipe->up_interval == USBD_DEFAULT_INTERVAL)
pipe->up_interval = ed->bInterval;
break;
case UE_ISOCHRONOUS:
return slhci_lock_call(sc, &slhci_isoc_warn, spipe,
NULL);
case UE_BULK:
spipe->ptype = PT_BULK;
pipe->up_interval = 0;
break;
}
DLOG(D_MSG, "open pipe type %jd interval %jd", spipe->ptype,
pipe->up_interval, 0,0);
pipe->up_methods = __UNCONST(&slhci_pipe_methods);
return slhci_lock_call(sc, &slhci_open_pipe, spipe, NULL);
}
}
int
slhci_supported_rev(uint8_t rev)
{
return rev >= SLTYPE_SL811HS_R12 && rev <= SLTYPE_SL811HS_R15;
}
/*
* Must be called before the ISR is registered. Interrupts can be shared so
* slhci_intr could be called as soon as the ISR is registered.
* Note max_current argument is actual current, but stored as current/2
*/
void
slhci_preinit(struct slhci_softc *sc, PowerFunc pow, bus_space_tag_t iot,
bus_space_handle_t ioh, uint16_t max_current, uint32_t stride)
{
struct slhci_transfers *t;
int i;
t = &sc->sc_transfers;
#ifdef SLHCI_DEBUG
ssc = sc;
#endif
mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);
mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB);
/* sc->sc_ier = 0; */
/* t->rootintr = NULL; */
t->flags = F_NODEV|F_UDISABLED;
t->pend = INT_MAX;
KASSERT(slhci_wait_time != INT_MAX);
t->len[0] = t->len[1] = -1;
if (max_current > 500)
max_current = 500;
t->max_current = (uint8_t)(max_current / 2);
sc->sc_enable_power = pow;
sc->sc_iot = iot;
sc->sc_ioh = ioh;
sc->sc_stride = stride;
KASSERT(Q_MAX+1 == sizeof(t->q) / sizeof(t->q[0]));
for (i = 0; i <= Q_MAX; i++)
gcq_init_head(&t->q[i]);
gcq_init_head(&t->timed);
gcq_init_head(&t->to);
gcq_init_head(&t->ap);
gcq_init_head(&sc->sc_waitq);
}
int
slhci_attach(struct slhci_softc *sc)
{
struct slhci_transfers *t;
const char *rev;
t = &sc->sc_transfers;
/* Detect and check the controller type */
t->sltype = SL11_GET_REV(slhci_read(sc, SL11_REV));
/* SL11H not supported */
if (!slhci_supported_rev(t->sltype)) {
if (t->sltype == SLTYPE_SL11H)
printf("%s: SL11H unsupported or bus error!\n",
SC_NAME(sc));
else
printf("%s: Unknown chip revision!\n", SC_NAME(sc));
return -1;
}
#ifdef SLHCI_DEBUG
if (slhci_memtest(sc)) {
printf("%s: memory/bus error!\n", SC_NAME(sc));
return -1;
}
#endif
callout_init(&sc->sc_timer, CALLOUT_MPSAFE);
callout_setfunc(&sc->sc_timer, slhci_reset_entry, sc);
/*
* It is not safe to call the soft interrupt directly as
* usb_schedsoftintr does in the ub_usepolling case (due to locking).
*/
sc->sc_cb_softintr = softint_establish(SOFTINT_NET,
slhci_callback_entry, sc);
if (t->sltype == SLTYPE_SL811HS_R12)
rev = "(rev 1.2)";
else if (t->sltype == SLTYPE_SL811HS_R14)
rev = "(rev 1.4 or 1.5)";
else
rev = "(unknown revision)";
aprint_normal("%s: ScanLogic SL811HS/T USB Host Controller %s\n",
SC_NAME(sc), rev);
aprint_normal("%s: Max Current %u mA (value by code, not by probe)\n",
SC_NAME(sc), t->max_current * 2);
#if defined(SLHCI_DEBUG) || defined(SLHCI_NO_OVERTIME) || \
defined(SLHCI_TRY_LSVH) || defined(SLHCI_PROFILE_TRANSFER)
aprint_normal("%s: driver options:"
#ifdef SLHCI_DEBUG
" SLHCI_DEBUG"
#endif
#ifdef SLHCI_TRY_LSVH
" SLHCI_TRY_LSVH"
#endif
#ifdef SLHCI_NO_OVERTIME
" SLHCI_NO_OVERTIME"
#endif
#ifdef SLHCI_PROFILE_TRANSFER
" SLHCI_PROFILE_TRANSFER"
#endif
"\n", SC_NAME(sc));
#endif
sc->sc_bus.ub_revision = USBREV_1_1;
sc->sc_bus.ub_methods = __UNCONST(&slhci_bus_methods);
sc->sc_bus.ub_pipesize = sizeof(struct slhci_pipe);
sc->sc_bus.ub_usedma = false;
if (!sc->sc_enable_power)
t->flags |= F_REALPOWER;
t->flags |= F_ACTIVE;
/* Attach usb and uhub. */
sc->sc_child = config_found(SC_DEV(sc), &sc->sc_bus, usbctlprint,
CFARGS_NONE);
if (!sc->sc_child)
return -1;
else
return 0;
}
int
slhci_detach(struct slhci_softc *sc, int flags)
{
struct slhci_transfers *t;
int ret;
t = &sc->sc_transfers;
/* By this point bus access is no longer allowed. */
KASSERT(!(t->flags & F_ACTIVE));
/*
* To be MPSAFE is not sufficient to cancel callouts and soft
* interrupts and assume they are dead since the code could already be
* running or about to run. Wait until they are known to be done.
*/
while (t->flags & (F_RESET|F_CALLBACK))
tsleep(&sc, PPAUSE, "slhci_detach", hz);
softint_disestablish(sc->sc_cb_softintr);
mutex_destroy(&sc->sc_lock);
mutex_destroy(&sc->sc_intr_lock);
ret = 0;
if (sc->sc_child)
ret = config_detach(sc->sc_child, flags);
#ifdef SLHCI_MEM_ACCOUNTING
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
if (sc->sc_mem_use) {
printf("%s: Memory still in use after detach! mem_use (count)"
" = %d\n", SC_NAME(sc), sc->sc_mem_use);
DDOLOG("Memory still in use after detach! mem_use (count)"
" = %d", sc->sc_mem_use, 0, 0, 0);
}
#endif
return ret;
}
int
slhci_activate(device_t self, enum devact act)
{
struct slhci_softc *sc = device_private(self);
switch (act) {
case DVACT_DEACTIVATE:
slhci_lock_call(sc, &slhci_halt, NULL, NULL);
return 0;
default:
return EOPNOTSUPP;
}
}
void
slhci_abort(struct usbd_xfer *xfer)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_softc *sc;
struct slhci_pipe *spipe;
spipe = SLHCI_PIPE2SPIPE(xfer->ux_pipe);
if (spipe == NULL)
goto callback;
sc = SLHCI_XFER2SC(xfer);
KASSERT(mutex_owned(&sc->sc_lock));
DLOG(D_TRACE, "transfer type %jd abort xfer %#jx spipe %#jx "
" spipe->xfer %#jx", spipe->ptype, (uintptr_t)xfer,
(uintptr_t)spipe, (uintptr_t)spipe->xfer);
slhci_lock_call(sc, &slhci_do_abort, spipe, xfer);
callback:
xfer->ux_status = USBD_CANCELLED;
usb_transfer_complete(xfer);
}
void
slhci_close(struct usbd_pipe *pipe)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_softc *sc;
struct slhci_pipe *spipe;
sc = SLHCI_PIPE2SC(pipe);
spipe = SLHCI_PIPE2SPIPE(pipe);
DLOG(D_TRACE, "transfer type %jd close spipe %#jx spipe->xfer %#jx",
spipe->ptype, (uintptr_t)spipe, (uintptr_t)spipe->xfer, 0);
slhci_lock_call(sc, &slhci_close_pipe, spipe, NULL);
}
void
slhci_clear_toggle(struct usbd_pipe *pipe)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_pipe *spipe;
spipe = SLHCI_PIPE2SPIPE(pipe);
DLOG(D_TRACE, "transfer type %jd toggle spipe %#jx", spipe->ptype,
(uintptr_t)spipe, 0, 0);
spipe->pflags &= ~PF_TOGGLE;
#ifdef DIAGNOSTIC
if (spipe->xfer != NULL) {
struct slhci_softc *sc = (struct slhci_softc
*)pipe->up_dev->ud_bus;
printf("%s: Clear toggle on transfer in progress! halted\n",
SC_NAME(sc));
DDOLOG("Clear toggle on transfer in progress! halted",
0, 0, 0, 0);
slhci_halt(sc, NULL, NULL);
}
#endif
}
void
slhci_poll(struct usbd_bus *bus) /* XXX necessary? */
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_softc *sc;
sc = SLHCI_BUS2SC(bus);
DLOG(D_TRACE, "slhci_poll", 0,0,0,0);
slhci_lock_call(sc, &slhci_do_poll, NULL, NULL);
}
void
slhci_done(struct usbd_xfer *xfer)
{
}
void
slhci_void(void *v) {}
/* End out of lock functions. Start lock entry functions. */
#ifdef SLHCI_MEM_ACCOUNTING
void
slhci_mem_use(struct usbd_bus *bus, int val)
{
struct slhci_softc *sc = SLHCI_BUS2SC(bus);
mutex_enter(&sc->sc_intr_lock);
sc->sc_mem_use += val;
mutex_exit(&sc->sc_intr_lock);
}
#endif
void
slhci_reset_entry(void *arg)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_softc *sc = arg;
mutex_enter(&sc->sc_intr_lock);
slhci_reset(sc);
/*
* We cannot call the callback directly since we could then be reset
* again before finishing and need the callout delay for timing.
* Scheduling the callout again before we exit would defeat the reap
* mechanism since we could be unlocked while the reset flag is not
* set. The callback code will check the wait queue.
*/
slhci_callback_schedule(sc);
mutex_exit(&sc->sc_intr_lock);
}
usbd_status
slhci_lock_call(struct slhci_softc *sc, LockCallFunc lcf, struct slhci_pipe
*spipe, struct usbd_xfer *xfer)
{
usbd_status ret;
mutex_enter(&sc->sc_intr_lock);
ret = (*lcf)(sc, spipe, xfer);
slhci_main(sc);
mutex_exit(&sc->sc_intr_lock);
return ret;
}
void
slhci_start_entry(struct slhci_softc *sc, struct slhci_pipe *spipe)
{
struct slhci_transfers *t;
mutex_enter(&sc->sc_intr_lock);
t = &sc->sc_transfers;
if (!(t->flags & (F_AINPROG|F_BINPROG))) {
slhci_enter_xfer(sc, spipe);
slhci_dotransfer(sc);
slhci_main(sc);
} else {
enter_waitq(sc, spipe);
}
mutex_exit(&sc->sc_intr_lock);
}
void
slhci_callback_entry(void *arg)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_softc *sc;
struct slhci_transfers *t;
sc = (struct slhci_softc *)arg;
mutex_enter(&sc->sc_intr_lock);
t = &sc->sc_transfers;
DLOG(D_SOFT, "callback_entry flags %#jx", t->flags, 0,0,0);
repeat:
slhci_callback(sc);
if (!gcq_empty(&sc->sc_waitq)) {
slhci_enter_xfers(sc);
slhci_dotransfer(sc);
slhci_waitintr(sc, 0);
goto repeat;
}
t->flags &= ~F_CALLBACK;
mutex_exit(&sc->sc_intr_lock);
}
void
slhci_do_callback(struct slhci_softc *sc, struct usbd_xfer *xfer)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
KASSERT(mutex_owned(&sc->sc_intr_lock));
start_cc_time(&t_callback, (u_int)xfer);
mutex_exit(&sc->sc_intr_lock);
mutex_enter(&sc->sc_lock);
usb_transfer_complete(xfer);
mutex_exit(&sc->sc_lock);
mutex_enter(&sc->sc_intr_lock);
stop_cc_time(&t_callback);
}
int
slhci_intr(void *arg)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_softc *sc = arg;
int ret = 0;
int irq;
start_cc_time(&t_hard_int, (unsigned int)arg);
mutex_enter(&sc->sc_intr_lock);
do {
irq = slhci_dointr(sc);
ret |= irq;
slhci_main(sc);
} while (irq);
mutex_exit(&sc->sc_intr_lock);
stop_cc_time(&t_hard_int);
return ret;
}
/* called with interrupt lock only held. */
void
slhci_main(struct slhci_softc *sc)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
waitcheck:
slhci_waitintr(sc, slhci_wait_time);
/*
* The direct call is needed in the ub_usepolling and disabled cases
* since the soft interrupt is not available. In the disabled case,
* this code can be reached from the usb detach, after the reaping of
* the soft interrupt. That test could be !F_ACTIVE, but there is no
* reason not to make the callbacks directly in the other DISABLED
* cases.
*/
if ((t->flags & F_ROOTINTR) || !gcq_empty(&t->q[Q_CALLBACKS])) {
if (__predict_false(sc->sc_bus.ub_usepolling ||
t->flags & F_DISABLED))
slhci_callback(sc);
else
slhci_callback_schedule(sc);
}
if (!gcq_empty(&sc->sc_waitq)) {
slhci_enter_xfers(sc);
slhci_dotransfer(sc);
goto waitcheck;
}
DLOG(D_INTR, "... done", 0, 0, 0, 0);
}
/* End lock entry functions. Start in lock function. */
/* Register read/write routines and barriers. */
#ifdef SLHCI_BUS_SPACE_BARRIERS
#define BSB(a, b, c, d, e) bus_space_barrier(a, b, c, d, BUS_SPACE_BARRIER_ # e)
#define BSB_SYNC(a, b, c, d) bus_space_barrier(a, b, c, d, BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE)
#else /* now !SLHCI_BUS_SPACE_BARRIERS */
#define BSB(a, b, c, d, e) __USE(d)
#define BSB_SYNC(a, b, c, d)
#endif /* SLHCI_BUS_SPACE_BARRIERS */
static void
slhci_write(struct slhci_softc *sc, uint8_t addr, uint8_t data)
{
bus_size_t paddr, pdata, pst, psz;
bus_space_tag_t iot;
bus_space_handle_t ioh;
paddr = pst = 0;
pdata = sc->sc_stride;
psz = pdata * 2;
iot = sc->sc_iot;
ioh = sc->sc_ioh;
bus_space_write_1(iot, ioh, paddr, addr);
BSB(iot, ioh, pst, psz, WRITE_BEFORE_WRITE);
bus_space_write_1(iot, ioh, pdata, data);
BSB(iot, ioh, pst, psz, WRITE_BEFORE_WRITE);
}
static uint8_t
slhci_read(struct slhci_softc *sc, uint8_t addr)
{
bus_size_t paddr, pdata, pst, psz;
bus_space_tag_t iot;
bus_space_handle_t ioh;
uint8_t data;
paddr = pst = 0;
pdata = sc->sc_stride;
psz = pdata * 2;
iot = sc->sc_iot;
ioh = sc->sc_ioh;
bus_space_write_1(iot, ioh, paddr, addr);
BSB(iot, ioh, pst, psz, WRITE_BEFORE_READ);
data = bus_space_read_1(iot, ioh, pdata);
BSB(iot, ioh, pst, psz, READ_BEFORE_WRITE);
return data;
}
#if 0 /* auto-increment mode broken, see errata doc */
static void
slhci_write_multi(struct slhci_softc *sc, uint8_t addr, uint8_t *buf, int l)
{
bus_size_t paddr, pdata, pst, psz;
bus_space_tag_t iot;
bus_space_handle_t ioh;
paddr = pst = 0;
pdata = sc->sc_stride;
psz = pdata * 2;
iot = sc->sc_iot;
ioh = sc->sc_ioh;
bus_space_write_1(iot, ioh, paddr, addr);
BSB(iot, ioh, pst, psz, WRITE_BEFORE_WRITE);
bus_space_write_multi_1(iot, ioh, pdata, buf, l);
BSB(iot, ioh, pst, psz, WRITE_BEFORE_WRITE);
}
static void
slhci_read_multi(struct slhci_softc *sc, uint8_t addr, uint8_t *buf, int l)
{
bus_size_t paddr, pdata, pst, psz;
bus_space_tag_t iot;
bus_space_handle_t ioh;
paddr = pst = 0;
pdata = sc->sc_stride;
psz = pdata * 2;
iot = sc->sc_iot;
ioh = sc->sc_ioh;
bus_space_write_1(iot, ioh, paddr, addr);
BSB(iot, ioh, pst, psz, WRITE_BEFORE_READ);
bus_space_read_multi_1(iot, ioh, pdata, buf, l);
BSB(iot, ioh, pst, psz, READ_BEFORE_WRITE);
}
#else
static void
slhci_write_multi(struct slhci_softc *sc, uint8_t addr, uint8_t *buf, int l)
{
#if 1
for (; l; addr++, buf++, l--)
slhci_write(sc, addr, *buf);
#else
bus_size_t paddr, pdata, pst, psz;
bus_space_tag_t iot;
bus_space_handle_t ioh;
paddr = pst = 0;
pdata = sc->sc_stride;
psz = pdata * 2;
iot = sc->sc_iot;
ioh = sc->sc_ioh;
for (; l; addr++, buf++, l--) {
bus_space_write_1(iot, ioh, paddr, addr);
BSB(iot, ioh, pst, psz, WRITE_BEFORE_WRITE);
bus_space_write_1(iot, ioh, pdata, *buf);
BSB(iot, ioh, pst, psz, WRITE_BEFORE_WRITE);
}
#endif
}
static void
slhci_read_multi(struct slhci_softc *sc, uint8_t addr, uint8_t *buf, int l)
{
#if 1
for (; l; addr++, buf++, l--)
*buf = slhci_read(sc, addr);
#else
bus_size_t paddr, pdata, pst, psz;
bus_space_tag_t iot;
bus_space_handle_t ioh;
paddr = pst = 0;
pdata = sc->sc_stride;
psz = pdata * 2;
iot = sc->sc_iot;
ioh = sc->sc_ioh;
for (; l; addr++, buf++, l--) {
bus_space_write_1(iot, ioh, paddr, addr);
BSB(iot, ioh, pst, psz, WRITE_BEFORE_READ);
*buf = bus_space_read_1(iot, ioh, pdata);
BSB(iot, ioh, pst, psz, READ_BEFORE_WRITE);
}
#endif
}
#endif
/*
* After calling waitintr it is necessary to either call slhci_callback or
* schedule the callback if necessary. The callback cannot be called directly
* from the hard interrupt since it interrupts at a high IPL and callbacks
* can do copyout and such.
*/
static void
slhci_waitintr(struct slhci_softc *sc, int wait_time)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
if (__predict_false(sc->sc_bus.ub_usepolling))
wait_time = 12000;
while (t->pend <= wait_time) {
DLOG(D_WAIT, "waiting... frame %jd pend %jd flags %#jx",
t->frame, t->pend, t->flags, 0);
LK_SLASSERT(t->flags & F_ACTIVE, sc, NULL, NULL, return);
LK_SLASSERT(t->flags & (F_AINPROG|F_BINPROG), sc, NULL, NULL,
return);
slhci_dointr(sc);
}
DLOG(D_WAIT, "... done", 0, 0, 0, 0);
}
static int
slhci_dointr(struct slhci_softc *sc)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
struct slhci_pipe *tosp;
uint8_t r;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
if (sc->sc_ier == 0) {
DLOG(D_INTR, "sc_ier is zero", 0, 0, 0, 0);
return 0;
}
r = slhci_read(sc, SL11_ISR);
#ifdef SLHCI_DEBUG
if (slhcidebug & SLHCI_D_INTR && r & sc->sc_ier &&
((r & ~(SL11_ISR_SOF|SL11_ISR_DATA)) || slhcidebug & SLHCI_D_SOF)) {
uint8_t e, f;
e = slhci_read(sc, SL11_IER);
f = slhci_read(sc, SL11_CTRL);
DDOLOG("Flags=%#x IER=%#x ISR=%#x CTRL=%#x", t->flags, e, r, f);
DDOLOGCTRL(f);
DDOLOGISR(r);
}
#endif
/*
* check IER for corruption occasionally. Assume that the above
* sc_ier == 0 case works correctly.
*/
if (__predict_false(sc->sc_ier_check++ > SLHCI_IER_CHECK_FREQUENCY)) {
sc->sc_ier_check = 0;
if (sc->sc_ier != slhci_read(sc, SL11_IER)) {
printf("%s: IER value corrupted! halted\n",
SC_NAME(sc));
DDOLOG("IER value corrupted! halted", 0, 0, 0, 0);
slhci_halt(sc, NULL, NULL);
return 1;
}
}
r &= sc->sc_ier;
if (r == 0) {
DLOG(D_INTR, "r is zero", 0, 0, 0, 0);
return 0;
}
sc->sc_ier_check = 0;
slhci_write(sc, SL11_ISR, r);
BSB_SYNC(sc->iot, sc->ioh, sc->pst, sc->psz);
/* If we have an insertion event we do not care about anything else. */
if (__predict_false(r & SL11_ISR_INSERT)) {
slhci_insert(sc);
DLOG(D_INTR, "... done", 0, 0, 0, 0);
return 1;
}
stop_cc_time(&t_intr);
start_cc_time(&t_intr, r);
if (r & SL11_ISR_SOF) {
t->frame++;
gcq_merge_tail(&t->q[Q_CB], &t->q[Q_NEXT_CB]);
/*
* SOFCHECK flags are cleared in tstart. Two flags are needed
* since the first SOF interrupt processed after the transfer
* is started might have been generated before the transfer
* was started.
*/
if (__predict_false(t->flags & F_SOFCHECK2 && t->flags &
(F_AINPROG|F_BINPROG))) {
printf("%s: Missed transfer completion. halted\n",
SC_NAME(sc));
DDOLOG("Missed transfer completion. halted", 0, 0, 0,
0);
slhci_halt(sc, NULL, NULL);
return 1;
} else if (t->flags & F_SOFCHECK1) {
t->flags |= F_SOFCHECK2;
} else
t->flags |= F_SOFCHECK1;
if (t->flags & F_CHANGE)
t->flags |= F_ROOTINTR;
while (__predict_true(GOT_FIRST_TO(tosp, t)) &&
__predict_false(tosp->to_frame <= t->frame)) {
tosp->xfer->ux_status = USBD_TIMEOUT;
slhci_do_abort(sc, tosp, tosp->xfer);
enter_callback(t, tosp);
}
/*
* Start any waiting transfers right away. If none, we will
* start any new transfers later.
*/
slhci_tstart(sc);
}
if (r & (SL11_ISR_USBA|SL11_ISR_USBB)) {
int ab;
if ((r & (SL11_ISR_USBA|SL11_ISR_USBB)) ==
(SL11_ISR_USBA|SL11_ISR_USBB)) {
if (!(t->flags & (F_AINPROG|F_BINPROG)))
return 1; /* presume card pulled */
LK_SLASSERT((t->flags & (F_AINPROG|F_BINPROG)) !=
(F_AINPROG|F_BINPROG), sc, NULL, NULL, return 1);
/*
* This should never happen (unless card removal just
* occurred) but appeared frequently when both
* transfers were started at the same time and was
* accompanied by data corruption. It still happens
* at times. I have not seen data correption except
* when the STATUS bit gets set, which now causes the
* driver to halt, however this should still not
* happen so the warning is kept. See comment in
* abdone, below.
*/
printf("%s: Transfer reported done but not started! "
"Verify data integrity if not detaching. "
" flags %#x r %x\n", SC_NAME(sc), t->flags, r);
if (!(t->flags & F_AINPROG))
r &= ~SL11_ISR_USBA;
else
r &= ~SL11_ISR_USBB;
}
t->pend = INT_MAX;
if (r & SL11_ISR_USBA)
ab = A;
else
ab = B;
/*
* This happens when a low speed device is attached to
* a hub with chip rev 1.5. SOF stops, but a few transfers
* still work before causing this error.
*/
if (!(t->flags & (ab ? F_BINPROG : F_AINPROG))) {
printf("%s: %s done but not in progress! halted\n",
SC_NAME(sc), ab ? "B" : "A");
DDOLOG("AB=%d done but not in progress! halted", ab,
0, 0, 0);
slhci_halt(sc, NULL, NULL);
return 1;
}
t->flags &= ~(ab ? F_BINPROG : F_AINPROG);
slhci_tstart(sc);
stop_cc_time(&t_ab[ab]);
start_cc_time(&t_abdone, t->flags);
slhci_abdone(sc, ab);
stop_cc_time(&t_abdone);
}
slhci_dotransfer(sc);
DLOG(D_INTR, "... done", 0, 0, 0, 0);
return 1;
}
static void
slhci_abdone(struct slhci_softc *sc, int ab)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
struct slhci_pipe *spipe;
struct usbd_xfer *xfer;
uint8_t status, buf_start;
uint8_t *target_buf;
unsigned int actlen;
int head;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
DLOG(D_TRACE, "ABDONE flags %#jx", t->flags, 0,0,0);
DLOG(D_MSG, "DONE AB=%jd spipe %#jx len %jd xfer %#jx", ab,
t->spipe[ab], (uintptr_t)t->len[ab],
(uintptr_t)(t->spipe[ab] ? t->spipe[ab]->xfer : NULL));
spipe = t->spipe[ab];
/*
* skip this one if aborted; do not call return from the rest of the
* function unless halting, else t->len will not be cleared.
*/
if (spipe == NULL)
goto done;
t->spipe[ab] = NULL;
xfer = spipe->xfer;
gcq_remove(&spipe->to);
LK_SLASSERT(xfer != NULL, sc, spipe, NULL, return);
status = slhci_read(sc, slhci_tregs[ab][STAT]);
/*
* I saw no status or remaining length greater than the requested
* length in early driver versions in circumstances I assumed caused
* excess power draw. I am no longer able to reproduce this when
* causing excess power draw circumstances.
*
* Disabling a power check and attaching aue to a keyboard and hub
* that is directly attached (to CFU1U, 100mA max, aue 160mA, keyboard
* 98mA) sometimes works and sometimes fails to configure. After
* removing the aue and attaching a self-powered umass dvd reader
* (unknown if it draws power from the host also) soon a single Error
* status occurs then only timeouts. The controller soon halts freeing
* memory due to being ONQU instead of BUSY. This may be the same
* basic sequence that caused the no status/bad length errors. The
* umass device seems to work (better at least) with the keyboard hub
* when not first attaching aue (tested once reading an approximately
* 200MB file).
*
* Overflow can indicate that the device and host disagree about how
* much data has been transferred. This may indicate a problem at any
* point during the transfer, not just when the error occurs. It may
* indicate data corruption. A warning message is printed.
*
* Trying to use both A and B transfers at the same time results in
* incorrect transfer completion ISR reports and the status will then
* include SL11_EPSTAT_SETUP, which is apparently set while the
* transfer is in progress. I also noticed data corruption, even
* after waiting for the transfer to complete. The driver now avoids
* trying to start both at the same time.
*
* I had accidently initialized the B registers before they were valid
* in some driver versions. Since every other performance enhancing
* feature has been confirmed buggy in the errata doc, I have not
* tried both transfers at once again with the documented
* initialization order.
*
* However, I have seen this problem again ("done but not started"
* errors), which in some cases cases the SETUP status bit to remain
* set on future transfers. In other cases, the SETUP bit is not set
* and no data corruption occurs. This occurred while using both umass
* and aue on a powered hub (maybe triggered by some local activity
* also) and needs several reads of the 200MB file to trigger. The
* driver now halts if SETUP is detected.
*/
actlen = 0;
if (__predict_false(!status)) {
DDOLOG("no status! xfer %p spipe %p", xfer, spipe, 0,0);
printf("%s: no status! halted\n", SC_NAME(sc));
slhci_halt(sc, spipe, xfer);
return;
}
#ifdef SLHCI_DEBUG
if ((slhcidebug & SLHCI_D_NAK) ||
(status & SL11_EPSTAT_ERRBITS) != SL11_EPSTAT_NAK) {
DDOLOG("USB Status = %#.2x", status, 0, 0, 0);
DDOLOGSTATUS(status);
}
#endif
if (!(status & SL11_EPSTAT_ERRBITS)) {
unsigned int cont = slhci_read(sc, slhci_tregs[ab][CONT]);
unsigned int len = spipe->tregs[LEN];
DLOG(D_XFER, "cont %jd len %jd", cont, len, 0, 0);
if ((status & SL11_EPSTAT_OVERFLOW) || cont > len) {
DDOLOG("overflow - cont %d len %d xfer->ux_length %d "
"xfer->actlen %d", cont, len, xfer->ux_length,
xfer->ux_actlen);
printf("%s: overflow cont %d len %d xfer->ux_length"
" %d xfer->ux_actlen %d\n", SC_NAME(sc), cont,
len, xfer->ux_length, xfer->ux_actlen);
actlen = len;
} else {
actlen = len - cont;
}
spipe->nerrs = 0;
}
/* Actual copyin done after starting next transfer. */
if (actlen && (spipe->tregs[PID] & SL11_PID_BITS) == SL11_PID_IN) {
target_buf = spipe->buffer;
buf_start = spipe->tregs[ADR];
} else {
target_buf = NULL;
buf_start = 0; /* XXX gcc uninitialized warnings */
}
if (status & SL11_EPSTAT_ERRBITS) {
status &= SL11_EPSTAT_ERRBITS;
if (status & SL11_EPSTAT_SETUP) {
printf("%s: Invalid controller state detected! "
"halted\n", SC_NAME(sc));
DDOLOG("Invalid controller state detected! "
"halted", 0, 0, 0, 0);
slhci_halt(sc, spipe, xfer);
return;
} else if (__predict_false(sc->sc_bus.ub_usepolling)) {
head = Q_CALLBACKS;
if (status & SL11_EPSTAT_STALL)
xfer->ux_status = USBD_STALLED;
else if (status & SL11_EPSTAT_TIMEOUT)
xfer->ux_status = USBD_TIMEOUT;
else if (status & SL11_EPSTAT_NAK)
head = Q_NEXT_CB;
else
xfer->ux_status = USBD_IOERROR;
} else if (status & SL11_EPSTAT_NAK) {
int i = spipe->pipe.up_interval;
if (i == 0)
i = 1;
DDOLOG("xfer %p spipe %p NAK delay by %d", xfer, spipe,
i, 0);
spipe->lastframe = spipe->frame = t->frame + i;
slhci_queue_timed(sc, spipe);
goto queued;
} else if (++spipe->nerrs > SLHCI_MAX_RETRIES ||
(status & SL11_EPSTAT_STALL)) {
DDOLOG("xfer %p spipe %p nerrs %d", xfer, spipe,
spipe->nerrs, 0);
if (status & SL11_EPSTAT_STALL)
xfer->ux_status = USBD_STALLED;
else if (status & SL11_EPSTAT_TIMEOUT)
xfer->ux_status = USBD_TIMEOUT;
else
xfer->ux_status = USBD_IOERROR;
DLOG(D_ERR, "Max retries reached! status %#jx "
"xfer->ux_status %jd", status, xfer->ux_status, 0,
0);
DDOLOGSTATUS(status);
head = Q_CALLBACKS;
} else {
head = Q_NEXT_CB;
}
} else if (spipe->ptype == PT_CTRL_SETUP) {
spipe->tregs[PID] = spipe->newpid;
if (xfer->ux_length) {
LK_SLASSERT(spipe->newlen[1] != 0, sc, spipe, xfer,
return);
spipe->tregs[LEN] = spipe->newlen[1];
spipe->bustime = spipe->newbustime[1];
spipe->buffer = xfer->ux_buf;
spipe->ptype = PT_CTRL_DATA;
} else {
status_setup:
/* CTRL_DATA swaps direction in PID then jumps here */
spipe->tregs[LEN] = 0;
if (spipe->pflags & PF_LS)
spipe->bustime = SLHCI_LS_CONST;
else
spipe->bustime = SLHCI_FS_CONST;
spipe->ptype = PT_CTRL_STATUS;
spipe->buffer = NULL;
}
/* Status or first data packet must be DATA1. */
spipe->control |= SL11_EPCTRL_DATATOGGLE;
if ((spipe->tregs[PID] & SL11_PID_BITS) == SL11_PID_IN)
spipe->control &= ~SL11_EPCTRL_DIRECTION;
else
spipe->control |= SL11_EPCTRL_DIRECTION;
head = Q_CB;
} else if (spipe->ptype == PT_CTRL_STATUS) {
head = Q_CALLBACKS;
} else { /* bulk, intr, control data */
xfer->ux_actlen += actlen;
spipe->control ^= SL11_EPCTRL_DATATOGGLE;
if (actlen == spipe->tregs[LEN] &&
(xfer->ux_length > xfer->ux_actlen || spipe->wantshort)) {
spipe->buffer += actlen;
LK_SLASSERT(xfer->ux_length >= xfer->ux_actlen, sc,
spipe, xfer, return);
if (xfer->ux_length - xfer->ux_actlen < actlen) {
spipe->wantshort = 0;
spipe->tregs[LEN] = spipe->newlen[0];
spipe->bustime = spipe->newbustime[0];
LK_SLASSERT(xfer->ux_actlen +
spipe->tregs[LEN] == xfer->ux_length, sc,
spipe, xfer, return);
}
head = Q_CB;
} else if (spipe->ptype == PT_CTRL_DATA) {
spipe->tregs[PID] ^= SLHCI_PID_SWAP_IN_OUT;
goto status_setup;
} else {
if (spipe->ptype == PT_INTR) {
spipe->lastframe +=
spipe->pipe.up_interval;
/*
* If ack, we try to keep the
* interrupt rate by using lastframe
* instead of the current frame.
*/
spipe->frame = spipe->lastframe +
spipe->pipe.up_interval;
}
/*
* Set the toggle for the next transfer. It
* has already been toggled above, so the
* current setting will apply to the next
* transfer.
*/
if (spipe->control & SL11_EPCTRL_DATATOGGLE)
spipe->pflags |= PF_TOGGLE;
else
spipe->pflags &= ~PF_TOGGLE;
head = Q_CALLBACKS;
}
}
if (head == Q_CALLBACKS) {
gcq_remove(&spipe->to);
if (xfer->ux_status == USBD_IN_PROGRESS) {
LK_SLASSERT(xfer->ux_actlen <= xfer->ux_length, sc,
spipe, xfer, return);
xfer->ux_status = USBD_NORMAL_COMPLETION;
}
}
enter_q(t, spipe, head);
queued:
if (target_buf != NULL) {
slhci_dotransfer(sc);
start_cc_time(&t_copy_from_dev, actlen);
slhci_read_multi(sc, buf_start, target_buf, actlen);
stop_cc_time(&t_copy_from_dev);
DLOGBUF(D_BUF, target_buf, actlen);
t->pend -= SLHCI_FS_CONST + SLHCI_FS_DATA_TIME(actlen);
}
done:
t->len[ab] = -1;
}
static void
slhci_tstart(struct slhci_softc *sc)
{
struct slhci_transfers *t;
struct slhci_pipe *spipe;
int remaining_bustime;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
if (!(t->flags & (F_AREADY|F_BREADY)))
return;
if (t->flags & (F_AINPROG|F_BINPROG|F_DISABLED))
return;
/*
* We have about 6 us to get from the bus time check to
* starting the transfer or we might babble or the chip might fail to
* signal transfer complete. This leaves no time for any other
* interrupts.
*/
remaining_bustime = (int)(slhci_read(sc, SL811_CSOF)) << 6;
remaining_bustime -= SLHCI_END_BUSTIME;
/*
* Start one transfer only, clearing any aborted transfers that are
* not yet in progress and skipping missed isoc. It is easier to copy
* & paste most of the A/B sections than to make the logic work
* otherwise and this allows better constant use.
*/
if (t->flags & F_AREADY) {
spipe = t->spipe[A];
if (spipe == NULL) {
t->flags &= ~F_AREADY;
t->len[A] = -1;
} else if (remaining_bustime >= spipe->bustime) {
t->flags &= ~(F_AREADY|F_SOFCHECK1|F_SOFCHECK2);
t->flags |= F_AINPROG;
start_cc_time(&t_ab[A], spipe->tregs[LEN]);
slhci_write(sc, SL11_E0CTRL, spipe->control);
goto pend;
}
}
if (t->flags & F_BREADY) {
spipe = t->spipe[B];
if (spipe == NULL) {
t->flags &= ~F_BREADY;
t->len[B] = -1;
} else if (remaining_bustime >= spipe->bustime) {
t->flags &= ~(F_BREADY|F_SOFCHECK1|F_SOFCHECK2);
t->flags |= F_BINPROG;
start_cc_time(&t_ab[B], spipe->tregs[LEN]);
slhci_write(sc, SL11_E1CTRL, spipe->control);
pend:
t->pend = spipe->bustime;
}
}
}
static void
slhci_dotransfer(struct slhci_softc *sc)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
struct slhci_pipe *spipe;
int ab, i;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
while ((t->len[A] == -1 || t->len[B] == -1) &&
(GOT_FIRST_TIMED_COND(spipe, t, spipe->frame <= t->frame) ||
GOT_FIRST_CB(spipe, t))) {
LK_SLASSERT(spipe->xfer != NULL, sc, spipe, NULL, return);
LK_SLASSERT(spipe->ptype != PT_ROOT_CTRL && spipe->ptype !=
PT_ROOT_INTR, sc, spipe, NULL, return);
/* Check that this transfer can fit in the remaining memory. */
if (t->len[A] + t->len[B] + spipe->tregs[LEN] + 1 >
SL11_MAX_PACKET_SIZE) {
DLOG(D_XFER, "Transfer does not fit. alen %jd blen %jd "
"len %jd", t->len[A], t->len[B], spipe->tregs[LEN],
0);
return;
}
gcq_remove(&spipe->xq);
if (t->len[A] == -1) {
ab = A;
spipe->tregs[ADR] = SL11_BUFFER_START;
} else {
ab = B;
spipe->tregs[ADR] = SL11_BUFFER_END -
spipe->tregs[LEN];
}
t->len[ab] = spipe->tregs[LEN];
if (spipe->tregs[LEN] && (spipe->tregs[PID] & SL11_PID_BITS)
!= SL11_PID_IN) {
start_cc_time(&t_copy_to_dev,
spipe->tregs[LEN]);
slhci_write_multi(sc, spipe->tregs[ADR],
spipe->buffer, spipe->tregs[LEN]);
stop_cc_time(&t_copy_to_dev);
t->pend -= SLHCI_FS_CONST +
SLHCI_FS_DATA_TIME(spipe->tregs[LEN]);
}
DLOG(D_MSG, "NEW TRANSFER AB=%jd flags %#jx alen %jd blen %jd",
ab, t->flags, t->len[0], t->len[1]);
if (spipe->tregs[LEN])
i = 0;
else
i = 1;
for (; i <= 3; i++)
if (t->current_tregs[ab][i] != spipe->tregs[i]) {
t->current_tregs[ab][i] = spipe->tregs[i];
slhci_write(sc, slhci_tregs[ab][i],
spipe->tregs[i]);
}
DLOG(D_SXFER, "Transfer len %jd pid %#jx dev %jd type %jd",
spipe->tregs[LEN], spipe->tregs[PID], spipe->tregs[DEV],
spipe->ptype);
t->spipe[ab] = spipe;
t->flags |= ab ? F_BREADY : F_AREADY;
slhci_tstart(sc);
}
}
/*
* slhci_callback is called after the lock is taken.
*/
static void
slhci_callback(struct slhci_softc *sc)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
struct slhci_pipe *spipe;
struct usbd_xfer *xfer;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
DLOG(D_SOFT, "CB flags %#jx", t->flags, 0,0,0);
for (;;) {
if (__predict_false(t->flags & F_ROOTINTR)) {
t->flags &= ~F_ROOTINTR;
if (t->rootintr != NULL) {
u_char *p;
KASSERT(t->rootintr->ux_status ==
USBD_IN_PROGRESS);
p = t->rootintr->ux_buf;
p[0] = 2;
t->rootintr->ux_actlen = 1;
t->rootintr->ux_status = USBD_NORMAL_COMPLETION;
xfer = t->rootintr;
goto do_callback;
}
}
if (!DEQUEUED_CALLBACK(spipe, t))
return;
xfer = spipe->xfer;
LK_SLASSERT(xfer != NULL, sc, spipe, NULL, return);
spipe->xfer = NULL;
DLOG(D_XFER, "xfer callback length %jd actlen %jd spipe %#jx "
"type %jd", xfer->ux_length, (uintptr_t)xfer->ux_actlen,
(uintptr_t)spipe, spipe->ptype);
do_callback:
slhci_do_callback(sc, xfer);
}
}
static void
slhci_enter_xfer(struct slhci_softc *sc, struct slhci_pipe *spipe)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
if (__predict_false(t->flags & F_DISABLED) ||
__predict_false(spipe->pflags & PF_GONE)) {
DLOG(D_MSG, "slhci_enter_xfer: DISABLED or GONE", 0,0,0,0);
spipe->xfer->ux_status = USBD_CANCELLED;
}
if (spipe->xfer->ux_status == USBD_IN_PROGRESS) {
if (spipe->xfer->ux_timeout) {
spipe->to_frame = t->frame + spipe->xfer->ux_timeout;
slhci_xfer_timer(sc, spipe);
}
if (spipe->pipe.up_interval)
slhci_queue_timed(sc, spipe);
else
enter_q(t, spipe, Q_CB);
} else
enter_callback(t, spipe);
}
static void
slhci_enter_xfers(struct slhci_softc *sc)
{
struct slhci_pipe *spipe;
KASSERT(mutex_owned(&sc->sc_intr_lock));
while (DEQUEUED_WAITQ(spipe, sc))
slhci_enter_xfer(sc, spipe);
}
static void
slhci_queue_timed(struct slhci_softc *sc, struct slhci_pipe *spipe)
{
struct slhci_transfers *t;
struct gcq *q;
struct slhci_pipe *spp;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
FIND_TIMED(q, t, spp, spp->frame > spipe->frame);
gcq_insert_before(q, &spipe->xq);
}
static void
slhci_xfer_timer(struct slhci_softc *sc, struct slhci_pipe *spipe)
{
struct slhci_transfers *t;
struct gcq *q;
struct slhci_pipe *spp;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
FIND_TO(q, t, spp, spp->to_frame >= spipe->to_frame);
gcq_insert_before(q, &spipe->to);
}
static void
slhci_callback_schedule(struct slhci_softc *sc)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
if (t->flags & F_ACTIVE)
slhci_do_callback_schedule(sc);
}
static void
slhci_do_callback_schedule(struct slhci_softc *sc)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
DLOG(D_MSG, "flags %#jx", t->flags, 0, 0, 0);
if (!(t->flags & F_CALLBACK)) {
t->flags |= F_CALLBACK;
softint_schedule(sc->sc_cb_softintr);
}
}
#if 0
/* must be called with lock taken. */
/* XXX static */ void
slhci_pollxfer(struct slhci_softc *sc, struct usbd_xfer *xfer)
{
KASSERT(mutex_owned(&sc->sc_intr_lock));
slhci_dotransfer(sc);
do {
slhci_dointr(sc);
} while (xfer->ux_status == USBD_IN_PROGRESS);
slhci_do_callback(sc, xfer);
}
#endif
static usbd_status
slhci_do_poll(struct slhci_softc *sc, struct slhci_pipe *spipe, struct
usbd_xfer *xfer)
{
slhci_waitintr(sc, 0);
return USBD_NORMAL_COMPLETION;
}
static usbd_status
slhci_lsvh_warn(struct slhci_softc *sc, struct slhci_pipe *spipe, struct
usbd_xfer *xfer)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
t = &sc->sc_transfers;
if (!(t->flags & F_LSVH_WARNED)) {
printf("%s: Low speed device via hub disabled, "
"see slhci(4)\n", SC_NAME(sc));
DDOLOG("Low speed device via hub disabled, "
"see slhci(4)", SC_NAME(sc), 0,0,0);
t->flags |= F_LSVH_WARNED;
}
return USBD_INVAL;
}
static usbd_status
slhci_isoc_warn(struct slhci_softc *sc, struct slhci_pipe *spipe, struct
usbd_xfer *xfer)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
t = &sc->sc_transfers;
if (!(t->flags & F_ISOC_WARNED)) {
printf("%s: ISOC transfer not supported "
"(see slhci(4))\n", SC_NAME(sc));
DDOLOG("ISOC transfer not supported "
"(see slhci(4))", 0, 0, 0, 0);
t->flags |= F_ISOC_WARNED;
}
return USBD_INVAL;
}
static usbd_status
slhci_open_pipe(struct slhci_softc *sc, struct slhci_pipe *spipe, struct
usbd_xfer *xfer)
{
struct slhci_transfers *t;
struct usbd_pipe *pipe;
t = &sc->sc_transfers;
pipe = &spipe->pipe;
if (t->flags & F_DISABLED)
return USBD_CANCELLED;
else if (pipe->up_interval && !slhci_reserve_bustime(sc, spipe, 1))
return USBD_PENDING_REQUESTS;
else {
enter_all_pipes(t, spipe);
return USBD_NORMAL_COMPLETION;
}
}
static usbd_status
slhci_close_pipe(struct slhci_softc *sc, struct slhci_pipe *spipe, struct
usbd_xfer *xfer)
{
struct usbd_pipe *pipe;
pipe = &spipe->pipe;
if (pipe->up_interval && spipe->ptype != PT_ROOT_INTR)
slhci_reserve_bustime(sc, spipe, 0);
gcq_remove(&spipe->ap);
return USBD_NORMAL_COMPLETION;
}
static usbd_status
slhci_do_abort(struct slhci_softc *sc, struct slhci_pipe *spipe, struct
usbd_xfer *xfer)
{
struct slhci_transfers *t;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
if (spipe->xfer == xfer) {
if (spipe->ptype == PT_ROOT_INTR) {
if (t->rootintr == spipe->xfer) /* XXX assert? */
t->rootintr = NULL;
} else {
gcq_remove(&spipe->to);
gcq_remove(&spipe->xq);
if (t->spipe[A] == spipe) {
t->spipe[A] = NULL;
if (!(t->flags & F_AINPROG))
t->len[A] = -1;
} else if (t->spipe[B] == spipe) {
t->spipe[B] = NULL;
if (!(t->flags & F_BINPROG))
t->len[B] = -1;
}
}
if (xfer->ux_status != USBD_TIMEOUT) {
spipe->xfer = NULL;
spipe->pipe.up_repeat = 0; /* XXX timeout? */
}
}
return USBD_NORMAL_COMPLETION;
}
/*
* Called to deactivate or stop use of the controller instead of panicking.
* Will cancel the xfer correctly even when not on a list.
*/
static usbd_status
slhci_halt(struct slhci_softc *sc, struct slhci_pipe *spipe,
struct usbd_xfer *xfer)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
KASSERT(mutex_owned(&sc->sc_intr_lock));
t = &sc->sc_transfers;
DDOLOG("Halt! sc %p spipe %p xfer %p", sc, spipe, xfer, 0);
if (spipe != NULL)
slhci_log_spipe(spipe);
if (xfer != NULL)
slhci_log_xfer(xfer);
if (spipe != NULL && xfer != NULL && spipe->xfer == xfer &&
!gcq_onlist(&spipe->xq) && t->spipe[A] != spipe && t->spipe[B] !=
spipe) {
xfer->ux_status = USBD_CANCELLED;
enter_callback(t, spipe);
}
if (t->flags & F_ACTIVE) {
slhci_intrchange(sc, 0);
/*
* leave power on when halting in case flash devices or disks
* are attached, which may be writing and could be damaged
* by abrupt power loss. The root hub clear power feature
* should still work after halting.
*/
}
t->flags &= ~F_ACTIVE;
t->flags |= F_UDISABLED;
if (!(t->flags & F_NODEV))
t->flags |= F_NODEV|F_CCONNECT|F_ROOTINTR;
slhci_drain(sc);
/* One last callback for the drain and device removal. */
slhci_do_callback_schedule(sc);
return USBD_NORMAL_COMPLETION;
}
/*
* There are three interrupt states: no interrupts during reset and after
* device deactivation, INSERT only for no device present but power on, and
* SOF, INSERT, ADONE, and BDONE when device is present.
*/
static void
slhci_intrchange(struct slhci_softc *sc, uint8_t new_ier)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
KASSERT(mutex_owned(&sc->sc_intr_lock));
if (sc->sc_ier != new_ier) {
DLOG(D_INTR, "New IER %#jx", new_ier, 0, 0, 0);
sc->sc_ier = new_ier;
slhci_write(sc, SL11_IER, new_ier);
BSB_SYNC(sc->iot, sc->ioh, sc->pst, sc->psz);
}
}
/*
* Drain: cancel all pending transfers and put them on the callback list and
* set the UDISABLED flag. UDISABLED is cleared only by reset.
*/
static void
slhci_drain(struct slhci_softc *sc)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
struct slhci_pipe *spipe;
struct gcq *q;
int i;
KASSERT(mutex_owned(&sc->sc_intr_lock));
t = &sc->sc_transfers;
DLOG(D_MSG, "DRAIN flags %#jx", t->flags, 0,0,0);
t->pend = INT_MAX;
for (i = 0; i <= 1; i++) {
t->len[i] = -1;
if (t->spipe[i] != NULL) {
enter_callback(t, t->spipe[i]);
t->spipe[i] = NULL;
}
}
/* Merge the queues into the callback queue. */
gcq_merge_tail(&t->q[Q_CALLBACKS], &t->q[Q_CB]);
gcq_merge_tail(&t->q[Q_CALLBACKS], &t->q[Q_NEXT_CB]);
gcq_merge_tail(&t->q[Q_CALLBACKS], &t->timed);
/*
* Cancel all pipes. Note that not all of these may be on the
* callback queue yet; some could be in slhci_start, for example.
*/
FOREACH_AP(q, t, spipe) {
spipe->pflags |= PF_GONE;
spipe->pipe.up_repeat = 0;
spipe->pipe.up_aborting = 1;
if (spipe->xfer != NULL)
spipe->xfer->ux_status = USBD_CANCELLED;
}
gcq_remove_all(&t->to);
t->flags |= F_UDISABLED;
t->flags &= ~(F_AREADY|F_BREADY|F_AINPROG|F_BINPROG|F_LOWSPEED);
}
/*
* RESET: SL11_CTRL_RESETENGINE=1 and SL11_CTRL_JKSTATE=0 for 50ms
* reconfigure SOF after reset, must wait 2.5us before USB bus activity (SOF)
* check attached device speed.
* must wait 100ms before USB transaction according to app note, 10ms
* by spec. uhub does this delay
*
* Started from root hub set feature reset, which does step one.
* ub_usepolling will call slhci_reset directly, otherwise the callout goes
* through slhci_reset_entry.
*/
void
slhci_reset(struct slhci_softc *sc)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
struct slhci_pipe *spipe;
struct gcq *q;
uint8_t r, pol, ctrl;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
stop_cc_time(&t_delay);
KASSERT(t->flags & F_ACTIVE);
start_cc_time(&t_delay, 0);
stop_cc_time(&t_delay);
slhci_write(sc, SL11_CTRL, 0);
start_cc_time(&t_delay, 3);
DELAY(3);
stop_cc_time(&t_delay);
slhci_write(sc, SL11_ISR, 0xff);
r = slhci_read(sc, SL11_ISR);
if (r & SL11_ISR_INSERT)
slhci_write(sc, SL11_ISR, SL11_ISR_INSERT);
if (r & SL11_ISR_NODEV) {
DLOG(D_MSG, "NC", 0,0,0,0);
/*
* Normally, the hard interrupt insert routine will issue
* CCONNECT, however we need to do it here if the detach
* happened during reset.
*/
if (!(t->flags & F_NODEV))
t->flags |= F_CCONNECT|F_ROOTINTR|F_NODEV;
slhci_intrchange(sc, SL11_IER_INSERT);
} else {
if (t->flags & F_NODEV)
t->flags |= F_CCONNECT;
t->flags &= ~(F_NODEV|F_LOWSPEED);
if (r & SL11_ISR_DATA) {
DLOG(D_MSG, "FS", 0,0,0,0);
pol = ctrl = 0;
} else {
DLOG(D_MSG, "LS", 0,0,0,0);
pol = SL811_CSOF_POLARITY;
ctrl = SL11_CTRL_LOWSPEED;
t->flags |= F_LOWSPEED;
}
/* Enable SOF auto-generation */
t->frame = 0; /* write to SL811_CSOF will reset frame */
slhci_write(sc, SL11_SOFTIME, 0xe0);
slhci_write(sc, SL811_CSOF, pol|SL811_CSOF_MASTER|0x2e);
slhci_write(sc, SL11_CTRL, ctrl|SL11_CTRL_ENABLESOF);
/*
* According to the app note, ARM must be set
* for SOF generation to work. We initialize all
* USBA registers here for current_tregs.
*/
slhci_write(sc, SL11_E0ADDR, SL11_BUFFER_START);
slhci_write(sc, SL11_E0LEN, 0);
slhci_write(sc, SL11_E0PID, SL11_PID_SOF);
slhci_write(sc, SL11_E0DEV, 0);
slhci_write(sc, SL11_E0CTRL, SL11_EPCTRL_ARM);
/*
* Initialize B registers. This can't be done earlier since
* they are not valid until the SL811_CSOF register is written
* above due to SL11H compatibility.
*/
slhci_write(sc, SL11_E1ADDR, SL11_BUFFER_END - 8);
slhci_write(sc, SL11_E1LEN, 0);
slhci_write(sc, SL11_E1PID, 0);
slhci_write(sc, SL11_E1DEV, 0);
t->current_tregs[0][ADR] = SL11_BUFFER_START;
t->current_tregs[0][LEN] = 0;
t->current_tregs[0][PID] = SL11_PID_SOF;
t->current_tregs[0][DEV] = 0;
t->current_tregs[1][ADR] = SL11_BUFFER_END - 8;
t->current_tregs[1][LEN] = 0;
t->current_tregs[1][PID] = 0;
t->current_tregs[1][DEV] = 0;
/* SOF start will produce USBA interrupt */
t->len[A] = 0;
t->flags |= F_AINPROG;
slhci_intrchange(sc, SLHCI_NORMAL_INTERRUPTS);
}
t->flags &= ~(F_UDISABLED|F_RESET);
t->flags |= F_CRESET|F_ROOTINTR;
FOREACH_AP(q, t, spipe) {
spipe->pflags &= ~PF_GONE;
spipe->pipe.up_aborting = 0;
}
DLOG(D_MSG, "RESET done flags %#jx", t->flags, 0,0,0);
}
#ifdef SLHCI_DEBUG
static int
slhci_memtest(struct slhci_softc *sc)
{
enum { ASC, DESC, EITHER = ASC }; /* direction */
enum { READ, WRITE }; /* operation */
const char *ptr, *elem;
size_t i;
const int low = SL11_BUFFER_START, high = SL11_BUFFER_END;
int addr = 0, dir = ASC, op = READ;
/* Extended March C- test algorithm (SOFs also) */
const char test[] = "E(w0) A(r0w1r1) A(r1w0r0) D(r0w1) D(r1w0) E(r0)";
char c;
const uint8_t dbs[] = { 0x00, 0x0f, 0x33, 0x55 }; /* data backgrounds */
uint8_t db;
/* Perform memory test for all data backgrounds. */
for (i = 0; i < __arraycount(dbs); i++) {
ptr = test;
elem = ptr;
/* Walk test algorithm string. */
while ((c = *ptr++) != '\0')
switch (tolower((int)c)) {
case 'a':
/* Address sequence is in ascending order. */
dir = ASC;
break;
case 'd':
/* Address sequence is in descending order. */
dir = DESC;
break;
case 'e':
/* Address sequence is in either order. */
dir = EITHER;
break;
case '(':
/* Start of test element (sequence). */
elem = ptr;
addr = (dir == ASC) ? low : high;
break;
case 'r':
/* read operation */
op = READ;
break;
case 'w':
/* write operation */
op = WRITE;
break;
case '0':
case '1':
/*
* Execute previously set-up operation by
* reading/writing non-inverted ('0') or
* inverted ('1') data background.
*/
db = (c - '0') ? ~dbs[i] : dbs[i];
if (op == READ) {
if (slhci_read(sc, addr) != db)
return -1;
} else
slhci_write(sc, addr, db);
break;
case ')':
/*
* End of element: Repeat same element with next
* address or continue to next element.
*/
addr = (dir == ASC) ? addr + 1 : addr - 1;
if (addr >= low && addr <= high)
ptr = elem;
break;
default:
/* Do nothing. */
break;
}
}
return 0;
}
#endif
/* returns 1 if succeeded, 0 if failed, reserve == 0 is unreserve */
static int
slhci_reserve_bustime(struct slhci_softc *sc, struct slhci_pipe *spipe, int
reserve)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
int bustime, max_packet;
KASSERT(mutex_owned(&sc->sc_intr_lock));
t = &sc->sc_transfers;
max_packet = UGETW(spipe->pipe.up_endpoint->ue_edesc->wMaxPacketSize);
if (spipe->pflags & PF_LS)
bustime = SLHCI_LS_CONST + SLHCI_LS_DATA_TIME(max_packet);
else
bustime = SLHCI_FS_CONST + SLHCI_FS_DATA_TIME(max_packet);
if (!reserve) {
t->reserved_bustime -= bustime;
#ifdef DIAGNOSTIC
if (t->reserved_bustime < 0) {
printf("%s: reserved_bustime %d < 0!\n",
SC_NAME(sc), t->reserved_bustime);
DDOLOG("reserved_bustime %d < 0!",
t->reserved_bustime, 0, 0, 0);
t->reserved_bustime = 0;
}
#endif
return 1;
}
if (t->reserved_bustime + bustime > SLHCI_RESERVED_BUSTIME) {
if (ratecheck(&sc->sc_reserved_warn_rate,
&reserved_warn_rate))
#ifdef SLHCI_NO_OVERTIME
{
printf("%s: Max reserved bus time exceeded! "
"Erroring request.\n", SC_NAME(sc));
DDOLOG("%s: Max reserved bus time exceeded! "
"Erroring request.", 0, 0, 0, 0);
}
return 0;
#else
{
printf("%s: Reserved bus time exceeds %d!\n",
SC_NAME(sc), SLHCI_RESERVED_BUSTIME);
DDOLOG("Reserved bus time exceeds %d!",
SLHCI_RESERVED_BUSTIME, 0, 0, 0);
}
#endif
}
t->reserved_bustime += bustime;
return 1;
}
/* Device insertion/removal interrupt */
static void
slhci_insert(struct slhci_softc *sc)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
if (t->flags & F_NODEV)
slhci_intrchange(sc, 0);
else {
slhci_drain(sc);
slhci_intrchange(sc, SL11_IER_INSERT);
}
t->flags ^= F_NODEV;
t->flags |= F_ROOTINTR|F_CCONNECT;
DLOG(D_MSG, "INSERT intr: flags after %#jx", t->flags, 0,0,0);
}
/*
* Data structures and routines to emulate the root hub.
*/
static usbd_status
slhci_clear_feature(struct slhci_softc *sc, unsigned int what)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
usbd_status error;
t = &sc->sc_transfers;
error = USBD_NORMAL_COMPLETION;
KASSERT(mutex_owned(&sc->sc_intr_lock));
if (what == UHF_PORT_POWER) {
DLOG(D_MSG, "POWER_OFF", 0,0,0,0);
t->flags &= ~F_POWER;
if (!(t->flags & F_NODEV))
t->flags |= F_NODEV|F_CCONNECT|F_ROOTINTR;
/* for x68k Nereid USB controller */
if (sc->sc_enable_power && (t->flags & F_REALPOWER)) {
t->flags &= ~F_REALPOWER;
sc->sc_enable_power(sc, POWER_OFF);
}
slhci_intrchange(sc, 0);
slhci_drain(sc);
} else if (what == UHF_C_PORT_CONNECTION) {
t->flags &= ~F_CCONNECT;
} else if (what == UHF_C_PORT_RESET) {
t->flags &= ~F_CRESET;
} else if (what == UHF_PORT_ENABLE) {
slhci_drain(sc);
} else if (what != UHF_PORT_SUSPEND) {
DDOLOG("ClrPortFeatERR:value=%#.4x", what, 0,0,0);
error = USBD_IOERROR;
}
return error;
}
static usbd_status
slhci_set_feature(struct slhci_softc *sc, unsigned int what)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
uint8_t r;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
if (what == UHF_PORT_RESET) {
if (!(t->flags & F_ACTIVE)) {
DDOLOG("SET PORT_RESET when not ACTIVE!",
0,0,0,0);
return USBD_INVAL;
}
if (!(t->flags & F_POWER)) {
DDOLOG("SET PORT_RESET without PORT_POWER! flags %p",
t->flags, 0,0,0);
return USBD_INVAL;
}
if (t->flags & F_RESET)
return USBD_NORMAL_COMPLETION;
DLOG(D_MSG, "RESET flags %#jx", t->flags, 0,0,0);
slhci_intrchange(sc, 0);
slhci_drain(sc);
slhci_write(sc, SL11_CTRL, SL11_CTRL_RESETENGINE);
/* usb spec says delay >= 10ms, app note 50ms */
start_cc_time(&t_delay, 50000);
if (sc->sc_bus.ub_usepolling) {
DELAY(50000);
slhci_reset(sc);
} else {
t->flags |= F_RESET;
callout_schedule(&sc->sc_timer, uimax(mstohz(50), 2));
}
} else if (what == UHF_PORT_SUSPEND) {
printf("%s: USB Suspend not implemented!\n", SC_NAME(sc));
DDOLOG("USB Suspend not implemented!", 0, 0, 0, 0);
} else if (what == UHF_PORT_POWER) {
DLOG(D_MSG, "PORT_POWER", 0,0,0,0);
/* for x68k Nereid USB controller */
if (!(t->flags & F_ACTIVE))
return USBD_INVAL;
if (t->flags & F_POWER)
return USBD_NORMAL_COMPLETION;
if (!(t->flags & F_REALPOWER)) {
if (sc->sc_enable_power)
sc->sc_enable_power(sc, POWER_ON);
t->flags |= F_REALPOWER;
}
t->flags |= F_POWER;
r = slhci_read(sc, SL11_ISR);
if (r & SL11_ISR_INSERT)
slhci_write(sc, SL11_ISR, SL11_ISR_INSERT);
if (r & SL11_ISR_NODEV) {
slhci_intrchange(sc, SL11_IER_INSERT);
t->flags |= F_NODEV;
} else {
t->flags &= ~F_NODEV;
t->flags |= F_CCONNECT|F_ROOTINTR;
}
} else {
DDOLOG("SetPortFeatERR=%#.8x", what, 0,0,0);
return USBD_IOERROR;
}
return USBD_NORMAL_COMPLETION;
}
static void
slhci_get_status(struct slhci_softc *sc, usb_port_status_t *ps)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
unsigned int status, change;
t = &sc->sc_transfers;
KASSERT(mutex_owned(&sc->sc_intr_lock));
/*
* We do not have a way to detect over current or babble and
* suspend is currently not implemented, so connect and reset
* are the only changes that need to be reported.
*/
change = 0;
if (t->flags & F_CCONNECT)
change |= UPS_C_CONNECT_STATUS;
if (t->flags & F_CRESET)
change |= UPS_C_PORT_RESET;
status = 0;
if (!(t->flags & F_NODEV))
status |= UPS_CURRENT_CONNECT_STATUS;
if (!(t->flags & F_UDISABLED))
status |= UPS_PORT_ENABLED;
if (t->flags & F_RESET)
status |= UPS_RESET;
if (t->flags & F_POWER)
status |= UPS_PORT_POWER;
if (t->flags & F_LOWSPEED)
status |= UPS_LOW_SPEED;
USETW(ps->wPortStatus, status);
USETW(ps->wPortChange, change);
DLOG(D_ROOT, "status=%#.4jx, change=%#.4jx", status, change, 0,0);
}
static int
slhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
void *buf, int buflen)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_softc *sc = SLHCI_BUS2SC(bus);
struct slhci_transfers *t = &sc->sc_transfers;
usbd_status error = USBD_IOERROR; /* XXX should be STALL */
uint16_t len, value, index;
uint8_t type;
int actlen = 0;
len = UGETW(req->wLength);
value = UGETW(req->wValue);
index = UGETW(req->wIndex);
type = req->bmRequestType;
SLHCI_DEXEC(D_TRACE, slhci_log_req(req));
/*
* USB requests for hubs have two basic types, standard and class.
* Each could potentially have recipients of device, interface,
* endpoint, or other. For the hub class, CLASS_OTHER means the port
* and CLASS_DEVICE means the hub. For standard requests, OTHER
* is not used. Standard request are described in section 9.4 of the
* standard, hub class requests in 11.16. Each request is either read
* or write.
*
* Clear Feature, Set Feature, and Status are defined for each of the
* used recipients. Get Descriptor and Set Descriptor are defined for
* both standard and hub class types with different descriptors.
* Other requests have only one defined recipient and type. These
* include: Get/Set Address, Get/Set Configuration, Get/Set Interface,
* and Synch Frame for standard requests and Get Bus State for hub
* class.
*
* When a device is first powered up it has address 0 until the
* address is set.
*
* Hubs are only allowed to support one interface and may not have
* isochronous endpoints. The results of the related requests are
* undefined.
*
* The standard requires invalid or unsupported requests to return
* STALL in the data stage, however this does not work well with
* current error handling. XXX
*
* Some unsupported fields:
* Clear Hub Feature is for C_HUB_LOCAL_POWER and C_HUB_OVER_CURRENT
* Set Device Features is for ENDPOINT_HALT and DEVICE_REMOTE_WAKEUP
* Get Bus State is optional sample of D- and D+ at EOF2
*/
switch (req->bRequest) {
/* Write Requests */
case UR_CLEAR_FEATURE:
if (type == UT_WRITE_CLASS_OTHER) {
if (index == 1 /* Port */) {
mutex_enter(&sc->sc_intr_lock);
error = slhci_clear_feature(sc, value);
mutex_exit(&sc->sc_intr_lock);
} else
DLOG(D_ROOT, "Clear Port Feature "
"index = %#.4jx", index, 0,0,0);
}
break;
case UR_SET_FEATURE:
if (type == UT_WRITE_CLASS_OTHER) {
if (index == 1 /* Port */) {
mutex_enter(&sc->sc_intr_lock);
error = slhci_set_feature(sc, value);
mutex_exit(&sc->sc_intr_lock);
} else
DLOG(D_ROOT, "Set Port Feature "
"index = %#.4jx", index, 0,0,0);
} else if (type != UT_WRITE_CLASS_DEVICE)
DLOG(D_ROOT, "Set Device Feature "
"ENDPOINT_HALT or DEVICE_REMOTE_WAKEUP "
"not supported", 0,0,0,0);
break;
/* Read Requests */
case UR_GET_STATUS:
if (type == UT_READ_CLASS_OTHER) {
if (index == 1 /* Port */ && len == /* XXX >=? */
sizeof(usb_port_status_t)) {
mutex_enter(&sc->sc_intr_lock);
slhci_get_status(sc, (usb_port_status_t *)
buf);
mutex_exit(&sc->sc_intr_lock);
actlen = sizeof(usb_port_status_t);
error = USBD_NORMAL_COMPLETION;
} else
DLOG(D_ROOT, "Get Port Status index = %#.4jx "
"len = %#.4jx", index, len, 0,0);
} else if (type == UT_READ_CLASS_DEVICE) { /* XXX index? */
if (len == sizeof(usb_hub_status_t)) {
DLOG(D_ROOT, "Get Hub Status",
0,0,0,0);
actlen = sizeof(usb_hub_status_t);
memset(buf, 0, actlen);
error = USBD_NORMAL_COMPLETION;
} else
DLOG(D_ROOT, "Get Hub Status bad len %#.4jx",
len, 0,0,0);
}
break;
case UR_GET_DESCRIPTOR:
if (type == UT_READ_DEVICE) {
/* value is type (&0xff00) and index (0xff) */
if (value == (UDESC_DEVICE<<8)) {
actlen = buflen;
error = USBD_NORMAL_COMPLETION;
} else if (value == (UDESC_CONFIG<<8)) {
struct usb_roothub_descriptors confd;
actlen = uimin(buflen, sizeof(confd));
memcpy(&confd, buf, actlen);
/* 2 mA units */
confd.urh_confd.bMaxPower = t->max_current;
memcpy(buf, &confd, actlen);
error = USBD_NORMAL_COMPLETION;
} else if (value == ((UDESC_STRING<<8)|1)) {
/* Vendor */
actlen = buflen;
error = USBD_NORMAL_COMPLETION;
} else if (value == ((UDESC_STRING<<8)|2)) {
/* Product */
actlen = usb_makestrdesc((usb_string_descriptor_t *)
buf, len, "SL811HS/T root hub");
error = USBD_NORMAL_COMPLETION;
} else
DDOLOG("Unknown Get Descriptor %#.4x",
value, 0,0,0);
} else if (type == UT_READ_CLASS_DEVICE) {
/* Descriptor number is 0 */
if (value == (UDESC_HUB<<8)) {
usb_hub_descriptor_t hubd;
actlen = uimin(buflen, sizeof(hubd));
memcpy(&hubd, buf, actlen);
hubd.bHubContrCurrent =
500 - t->max_current;
memcpy(buf, &hubd, actlen);
error = USBD_NORMAL_COMPLETION;
} else
DDOLOG("Unknown Get Hub Descriptor %#.4x",
value, 0,0,0);
}
break;
default:
/* default from usbroothub */
return buflen;
}
if (error == USBD_NORMAL_COMPLETION)
return actlen;
return -1;
}
/* End in lock functions. Start debug functions. */
#ifdef SLHCI_DEBUG
void
slhci_log_buffer(struct usbd_xfer *xfer)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
u_char *buf;
if(xfer->ux_length > 0 &&
UE_GET_DIR(xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress) ==
UE_DIR_IN) {
buf = xfer->ux_buf;
DDOLOGBUF(buf, xfer->ux_actlen);
DDOLOG("len %d actlen %d short %d", xfer->ux_length,
xfer->ux_actlen, xfer->ux_length - xfer->ux_actlen, 0);
}
}
void
slhci_log_req(usb_device_request_t *r)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
int req, type, value, index, len;
req = r->bRequest;
type = r->bmRequestType;
value = UGETW(r->wValue);
index = UGETW(r->wIndex);
len = UGETW(r->wLength);
DDOLOG("request: type %#x", type, 0, 0, 0);
DDOLOG("request: r=%d,v=%d,i=%d,l=%d ", req, value, index, len);
}
void
slhci_log_dumpreg(void)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
uint8_t r;
unsigned int aaddr, alen, baddr, blen;
static u_char buf[240];
r = slhci_read(ssc, SL11_E0CTRL);
DDOLOG("USB A Host Control = %#.2x", r, 0, 0, 0);
DDOLOGEPCTRL(r);
aaddr = slhci_read(ssc, SL11_E0ADDR);
DDOLOG("USB A Base Address = %u", aaddr, 0,0,0);
alen = slhci_read(ssc, SL11_E0LEN);
DDOLOG("USB A Length = %u", alen, 0,0,0);
r = slhci_read(ssc, SL11_E0STAT);
DDOLOG("USB A Status = %#.2x", r, 0,0,0);
DDOLOGEPSTAT(r);
r = slhci_read(ssc, SL11_E0CONT);
DDOLOG("USB A Remaining or Overflow Length = %u", r, 0,0,0);
r = slhci_read(ssc, SL11_E1CTRL);
DDOLOG("USB B Host Control = %#.2x", r, 0,0,0);
DDOLOGEPCTRL(r);
baddr = slhci_read(ssc, SL11_E1ADDR);
DDOLOG("USB B Base Address = %u", baddr, 0,0,0);
blen = slhci_read(ssc, SL11_E1LEN);
DDOLOG("USB B Length = %u", blen, 0,0,0);
r = slhci_read(ssc, SL11_E1STAT);
DDOLOG("USB B Status = %#.2x", r, 0,0,0);
DDOLOGEPSTAT(r);
r = slhci_read(ssc, SL11_E1CONT);
DDOLOG("USB B Remaining or Overflow Length = %u", r, 0,0,0);
r = slhci_read(ssc, SL11_CTRL);
DDOLOG("Control = %#.2x", r, 0,0,0);
DDOLOGCTRL(r);
r = slhci_read(ssc, SL11_IER);
DDOLOG("Interrupt Enable = %#.2x", r, 0,0,0);
DDOLOGIER(r);
r = slhci_read(ssc, SL11_ISR);
DDOLOG("Interrupt Status = %#.2x", r, 0,0,0);
DDOLOGISR(r);
r = slhci_read(ssc, SL11_REV);
DDOLOG("Revision = %#.2x", r, 0,0,0);
r = slhci_read(ssc, SL811_CSOF);
DDOLOG("SOF Counter = %#.2x", r, 0,0,0);
if (alen && aaddr >= SL11_BUFFER_START && aaddr < SL11_BUFFER_END &&
alen <= SL11_MAX_PACKET_SIZE && aaddr + alen <= SL11_BUFFER_END) {
slhci_read_multi(ssc, aaddr, buf, alen);
DDOLOG("USBA Buffer: start %u len %u", aaddr, alen, 0,0);
DDOLOGBUF(buf, alen);
} else if (alen)
DDOLOG("USBA Buffer Invalid", 0,0,0,0);
if (blen && baddr >= SL11_BUFFER_START && baddr < SL11_BUFFER_END &&
blen <= SL11_MAX_PACKET_SIZE && baddr + blen <= SL11_BUFFER_END) {
slhci_read_multi(ssc, baddr, buf, blen);
DDOLOG("USBB Buffer: start %u len %u", baddr, blen, 0,0);
DDOLOGBUF(buf, blen);
} else if (blen)
DDOLOG("USBB Buffer Invalid", 0,0,0,0);
}
void
slhci_log_xfer(struct usbd_xfer *xfer)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
DDOLOG("xfer: length=%u, actlen=%u, flags=%#x, timeout=%u,",
xfer->ux_length, xfer->ux_actlen, xfer->ux_flags, xfer->ux_timeout);
DDOLOG("buffer=%p", xfer->ux_buf, 0,0,0);
slhci_log_req(&xfer->ux_request);
}
void
slhci_log_spipe(struct slhci_pipe *spipe)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
DDOLOG("spipe %p onlists: AP=%d TO=%d XQ=%d", spipe,
gcq_onlist(&spipe->ap) ? 1 : 0,
gcq_onlist(&spipe->to) ? 1 : 0,
gcq_onlist(&spipe->xq) ? 1 : 0);
DDOLOG("spipe: xfer %p buffer %p pflags %#x ptype %d",
spipe->xfer, spipe->buffer, spipe->pflags, spipe->ptype);
}
void
slhci_print_intr(void)
{
unsigned int ier, isr;
ier = slhci_read(ssc, SL11_IER);
isr = slhci_read(ssc, SL11_ISR);
printf("IER: %#x ISR: %#x \n", ier, isr);
}
#if 0
void
slhci_log_sc(void)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
struct slhci_transfers *t;
int i;
t = &ssc->sc_transfers;
DDOLOG("Flags=%#x", t->flags, 0,0,0);
DDOLOG("a = %p Alen=%d b = %p Blen=%d", t->spipe[0], t->len[0],
t->spipe[1], t->len[1]);
for (i = 0; i <= Q_MAX; i++)
DDOLOG("Q %d: %p", i, gcq_hq(&t->q[i]), 0,0);
DDOLOG("TIMED: %p", GCQ_ITEM(gcq_hq(&t->to),
struct slhci_pipe, to), 0,0,0);
DDOLOG("frame=%d rootintr=%p", t->frame, t->rootintr, 0,0);
DDOLOG("ub_usepolling=%d", ssc->sc_bus.ub_usepolling, 0, 0, 0);
}
void
slhci_log_slreq(struct slhci_pipe *r)
{
SLHCIHIST_FUNC(); SLHCIHIST_CALLED();
DDOLOG("xfer: %p", r->xfer, 0,0,0);
DDOLOG("buffer: %p", r->buffer, 0,0,0);
DDOLOG("bustime: %u", r->bustime, 0,0,0);
DDOLOG("control: %#x", r->control, 0,0,0);
DDOLOGEPCTRL(r->control);
DDOLOG("pid: %#x", r->tregs[PID], 0,0,0);
DDOLOG("dev: %u", r->tregs[DEV], 0,0,0);
DDOLOG("len: %u", r->tregs[LEN], 0,0,0);
if (r->xfer)
slhci_log_xfer(r->xfer);
}
#endif
#endif /* SLHCI_DEBUG */
/* End debug functions. */
|
0203ab69aa5081c83b6e8c9013c331fadab0568d
|
f268b50cfc676024734009a0678825d01fa78a57
|
/src/stages/stage2/wriggle.c
|
1a471bf7d610366c95e99eff2ea8acf2ba4d6229
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC-BY-4.0",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
taisei-project/taisei
|
90a1358567c77555eabfdb340bb6adeb913e2ced
|
f1c156cacdb579e66d4bc1776d4d1809e93014d2
|
refs/heads/master
| 2023-09-04T06:25:18.445412
| 2023-09-02T17:31:06
| 2023-09-02T17:31:06
| 977,986
| 785
| 87
|
NOASSERTION
| 2023-04-29T18:16:47
| 2010-10-11T07:31:32
|
C
|
UTF-8
|
C
| false
| false
| 515
|
c
|
wriggle.c
|
/*
* This software is licensed under the terms of the MIT License.
* See COPYING for further information.
* ---
* Copyright (c) 2011-2019, Lukas Weber <laochailan@web.de>.
* Copyright (c) 2012-2019, Andrei Alexeyev <akari@taisei-project.org>.
*/
#include "taisei.h"
#include "wriggle.h"
Boss *stage2_spawn_wriggle(cmplx pos) {
Boss* wriggle = create_boss("Wriggle", "wriggle", pos);
wriggle->glowcolor = *RGB(0.2, 0.4, 0.5);
wriggle->shadowcolor = *RGBA_MUL_ALPHA(0.4, 0.2, 0.6, 0.5);
return wriggle;
}
|
85179eb65b8ab8b35b822a6624da8bed676f6f52
|
010279e2ba272d09e9d2c4e903722e5faba2cf7a
|
/contrib/libs/libf2c/c_sqrt.c
|
1678c534d60a8e59b0cfc6966f06ca15845a8e67
|
[
"LicenseRef-scancode-other-permissive",
"Apache-2.0"
] |
permissive
|
catboost/catboost
|
854c1a1f439a96f1ae6b48e16644be20aa04dba2
|
f5042e35b945aded77b23470ead62d7eacefde92
|
refs/heads/master
| 2023-09-01T12:14:14.174108
| 2023-09-01T10:01:01
| 2023-09-01T10:22:12
| 97,556,265
| 8,012
| 1,425
|
Apache-2.0
| 2023-09-11T03:32:32
| 2017-07-18T05:29:04
|
Python
|
UTF-8
|
C
| false
| false
| 605
|
c
|
c_sqrt.c
|
#include "f2c.h"
#ifdef KR_headers
extern double sqrt(), f__cabs();
VOID c_sqrt(r, z) complex *r, *z;
#else
#undef abs
#include "math.h"
#ifdef __cplusplus
extern "C" {
#endif
extern double f__cabs(double, double);
void c_sqrt(complex *r, complex *z)
#endif
{
double mag, t;
double zi = z->i, zr = z->r;
if( (mag = f__cabs(zr, zi)) == 0.)
r->r = r->i = 0.;
else if(zr > 0)
{
r->r = t = sqrt(0.5 * (mag + zr) );
t = zi / t;
r->i = 0.5 * t;
}
else
{
t = sqrt(0.5 * (mag - zr) );
if(zi < 0)
t = -t;
r->i = t;
t = zi / t;
r->r = 0.5 * t;
}
}
#ifdef __cplusplus
}
#endif
|
731a67329e5b489d1bfa094c1738697b4926cc92
|
fa1e5fac8c88c7ee3b4b6540ee3e1a2668c02557
|
/Python/ext_src/abc_ide_util.h
|
0afb6444411c8225c3503e2c516df11d7056f077
|
[
"MIT"
] |
permissive
|
zhaokg/Rbeast
|
cb6e6a2b6f846c3193c3299c72784466d331aa2b
|
2dfe69eb9d8b44512231a67aceee7f46f38d20bd
|
refs/heads/master
| 2023-09-03T10:20:22.697438
| 2023-09-01T14:38:42
| 2023-09-01T14:38:42
| 199,787,044
| 124
| 29
| null | 2022-04-07T01:11:45
| 2019-07-31T05:42:54
|
C
|
UTF-8
|
C
| false
| false
| 10,697
|
h
|
abc_ide_util.h
|
#pragma once
#include "abc_001_config.h"
#include "abc_datatype.h"
#include "abc_ts_func.h"
#ifdef __cplusplus
extern "C" {
#endif
extern I08 IDE_USER_INTERRUPT;
typedef enum IO_TYPE { MEM_IO, DISK_IO } IO_TYPE;
typedef struct FIELD_ITEM {
char name[64];
DATA_TYPE type;
int ndim;
int dims[5];
void** ptr;
int extra; //added for extension to multivariate ts for mrbeast
} FIELD_ITEM;
VOID_PTR GetFieldByIdx(VOID_PTR strucVar, I32 ind);
void GetFieldNameByIdx(VOID_PTR strucVar, I32 ind0, char* str, int buflen);
void* CreateNumVar(DATA_TYPE dtype, int* dims, int ndims, VOIDPTR* data_ptr);
void * CreateNumVector(DATA_TYPE dtype, int length, VOIDPTR* data_ptr);
void * CreateNumMatrix(DATA_TYPE dtype, int Nrow, int Ncol, VOIDPTR* data_ptr);
void* CreateF32NumVector(int length, VOIDPTR* data_ptr);
void* CreateF32NumMatrix(int Nrow, int Ncol, VOIDPTR* data_ptr);
void* CreateF64NumVector(int length, VOIDPTR* data_ptr);
void* CreateF64NumMatrix(int Nrow, int Ncol, VOIDPTR* data_ptr);
void* CreateI32NumVector(int length, VOIDPTR* data_ptr);
void* CreateI32NumMatrix(int Nrow, int Ncol, VOIDPTR* data_ptr);
void ReplaceStructField(VOIDPTR s, char* fname, VOIDPTR newvalue);
void* CreateStructVar(FIELD_ITEM* fieldList, int nfields);
void DestoryStructVar(VOID_PTR strutVar);
void RemoveSingltonDims(FIELD_ITEM* flist, I32 nlist);
void RemoveField(FIELD_ITEM* fieldList, int nfields, char* fieldName);
void AddStringAttribute(VOID_PTR listVar, const char* field, const char* value);
void AddIntegerAttribute(VOID_PTR listVar, const char* field, I32 value);
void RemoveAttribute(VOID_PTR listVar, const char* field);
extern I32 GetConsoleWidth(void);
extern void printProgress(F32 pct, I32 width, char* buf, I32 firstTimeRun);
extern void printProgress2(F32 pct, F64 time, I32 width, char* buf, I32 firstTimeRun);
I32 GetCharArray(void* ptr, char* dst, int n);
I32 GetCharVecElem(void* ptr, int idx, char* dst, int n);
void* GetField123(const void* structVar, char* fname, int nPartial);
void* GetField(const void* structVar, char* fname);
void* GetField123Check(const void* structVar, char* fname, int nPartial);
void* GetFieldCheck(const void* structVar, char* fname);
F64 GetScalar(const void* ptr);
F64 GetNumericElement(const void* Y, I32 idx);
void* GetData(const void* ptr);
int GetDataType(VOID_PTR Y);
int GetDim1(const void* ptr);
int GetDim2(const void* ptr);
int GetNumOfDim(const void* ptr);
void GetDimensions(const void* ptr, int dims[], int ndims);
// Return the pointer to the modidied object: For python, it is a new pyobject poiniting to the old memory
void *SetDimensions(const void* ptr, int dims[], int ndims);
int GetNumberOfElements(const void* ptr);
I32 GetNumberOfFields(const void* structVar);
int IsClass(void* ptr, char* classstr);
int IsCell(void* ptr);
int IsChar(void* ptr);
int IsEmpty(void* ptr);
int IsStruct(void* ptr);
int IsNumeric(void* ptr);
int IsDouble(void* ptr);
int IsSingle(void* ptr);
int IsInt32(void* ptr);
int IsInt16(void* ptr);
int IsInt64(void* ptr);
int IsLogical(void* ptr);
int HaveEqualDimesions(const void* p1, const void* p2);
int CopyNumericObjToF32Arr(F32PTR outmem, VOID_PTR infield, int N);
int CopyNumericObjToI32Arr(I32PTR outmem, VOID_PTR infield, int N);
int CopyNumericObjToF64Arr(F64PTR outmem, VOID_PTR infield, int N);
extern I32 CheckInterrupt(void);
extern void ConsumeInterruptSignal(void);
static INLINE int IsRinterface(void) { return R_INTERFACE; }
static INLINE int IsMinterface(void) { return M_INTERFACE; }
static INLINE int IsPinterface(void) { return P_INTERFACE; }
int GetNumElemTimeObject(VOID_PTR timeObj);
typedef struct {
F64 fyear;
F64 value;
I08 unit;
} TimeScalarInfo;
F64 Parse_TimeIntervalObject(VOIDPTR* obj, TimeScalarInfo* tint);
F64 Parse_SingelDateObject(VOIDPTR* obj, TimeScalarInfo* tint);
extern char* dateNumOriginStr;
int JDN_to_DateNum(int jdn);
void TimeVec_init(TimeVecInfo* tv);
void TimeVec_kill(TimeVecInfo* tv);
void TimeVec_kill_fyearArray(TimeVecInfo* tv);
int TimeVec_from_TimeObject(VOID_PTR timeObj, TimeVecInfo* tv);
void TimeVec_from_StartDeltaTime(TimeVecInfo* tv, F32 start, F32 dt, int N, int isDate);
void TimeVec_SortCheckRegularOrder(TimeVecInfo* tv);
void TimeVec_UpdateUserStartDt(TimeVecInfo* tv);
/**********************************************/
// Convert the input data into a contiguours Fortran aarray.
// It will genrate a new referrence that must be explicilty
// decref-ed when program exits.
// For R and Matlab, nothing will be done
void* CvtToPyArray_NewRef(VOIDPTR Y);
/**********************************************/
#include "abc_mem.h"
void CharObj2CharArr(VOID_PTR o, DynMemBuf* str, DynAlignedBuf* charstart, DynAlignedBuf* nchars);
void obj_to_str(VOID_PTR o, DynMemBufPtr s, int leftMargin);
int IDEPrintObject(VOID_PTR o);
#if M_INTERFACE==1
//#define r_error(x) mexErrMsgTxt(x) //mexErrMsgTxt dones' take a va_args input.
#define r_printf(...) mexPrintf(__VA_ARGS__)
//#define q_warning(...) mexPrintf(__VA_ARGS__)
//#define r_error(...) mexPrintf(__VA_ARGS__)
#define r_error(...) do { \
char sss[1000]; \
snprintf(sss,1000-1, __VA_ARGS__);\
word_wrap_indented(sss,110,5); \
mexPrintf("%s",sss); \
}while(0)
#define r_warning(...) r_error(__VA_ARGS__)
#define r_malloc(x) mxMalloc(x)
#define r_free(x) mxFree(x)
#define IDE_NULL mxCreateNumericMatrix(0, 0, mxSINGLE_CLASS, mxREAL)
#elif R_INTERFACE==1
//stackoverflow.com/questions/37206118/va-args-not-swallowing-comma-when-zero-args-under-c99
//#define mexPrintf(output, ...) Rprintf(output, ##__VA_ARGS__) //"##" is used to swallow the preceding comma if it is empty!
#define r_printf(...) Rprintf(__VA_ARGS__)
#define r_error(...) error(__VA_ARGS__)
//#define q_warning(...) warning(__VA_ARGS__)
#define r_warning(...) Rf_warning(__VA_ARGS__)
#define r_malloc(x) Calloc(x, char) //from header file R_ext\RS.h
#define r_free(x) Free(x)
#define IDE_NULL R_NilValue
#elif P_INTERFACE==1
//stackoverflow.com/questions/37206118/va-args-not-swallowing-comma-when-zero-args-under-c99
//#define mexPrintf(output, ...) Rprintf(output, ##__VA_ARGS__) //"##" is used to swallow the preceding comma if it is empty!
#define r_printf(...) printf(__VA_ARGS__)
#define r_printf(...) PySys_WriteStdout(__VA_ARGS__)
#define r_error(...) printf(__VA_ARGS__)
//#define q_warning(...) warning(__VA_ARGS__)
#define r_warning(...) printf(__VA_ARGS__)
#define r_malloc(x) PyMem_RawMalloc(x, char) //from header file R_ext\RS.h
#define r_free(x) PyMem_RawFree(x)
#define IDE_NULL Py_None
#endif
// defined in globalvars.c
extern char GLOBAL_QUIET_MODE;
#define q_warning(...) { if (!GLOBAL_QUIET_MODE) {r_warning(__VA_ARGS__);}}
#define q_printf(...) { if (!GLOBAL_QUIET_MODE) {r_printf(__VA_ARGS__);} }
#if R_INTERFACE==1
extern SEXP getListElement(SEXP list, const char* str);
extern SEXP getListElement_CaseIn(SEXP list, const char* str);
// https: //github.com/ryanoasis/public-bash-scripts/blob/master/unix-color-codes.sh
// https: //stackoverflow.com/questions/5947742/how-to-change-the-output-color-of-echo-in-linux/5947779
/*
Color_Off = "\[\033[0m\]" # Text Reset
# Regular Colors
Black = "\[\033[0;30m\]" # Black
Red = "\[\033[0;31m\]" # Red
Green = "\[\033[0;32m\]" # Green
Yellow = "\[\033[0;33m\]" # Yellow
Blue = "\[\033[0;34m\]" # Blue
Purple = "\[\033[0;35m\]" # Purple
Cyan = "\[\033[0;36m\]" # Cyan
White = "\[\033[0;37m\]" # White
# Bold
BBlack = "\[\033[1;30m\]" # Black
BRed = "\[\033[1;31m\]" # Red
BGreen = "\[\033[1;32m\]" # Green
BYellow = "\[\033[1;33m\]" # Yellow
BBlue = "\[\033[1;34m\]" # Blue
BPurple = "\[\033[1;35m\]" # Purple
BCyan = "\[\033[1;36m\]" # Cyan
BWhite = "\[\033[1;37m\]" # White
# Underline
UBlack = "\[\033[4;30m\]" # Black
URed = "\[\033[4;31m\]" # Red
UGreen = "\[\033[4;32m\]" # Green
UYellow = "\[\033[4;33m\]" # Yellow
UBlue = "\[\033[4;34m\]" # Blue
UPurple = "\[\033[4;35m\]" # Purple
UCyan = "\[\033[4;36m\]" # Cyan
UWhite = "\[\033[4;37m\]" # White
# Background
On_Black = "\[\033[40m\]" # Black
On_Red = "\[\033[41m\]" # Red
On_Green = "\[\033[42m\]" # Green
On_Yellow = "\[\033[43m\]" # Yellow
On_Blue = "\[\033[44m\]" # Blue
On_Purple = "\[\033[45m\]" # Purple
On_Cyan = "\[\033[46m\]" # Cyan
On_White = "\[\033[47m\]" # White
# High Intensty
IBlack = "\[\033[0;90m\]" # Black
IRed = "\[\033[0;91m\]" # Red
IGreen = "\[\033[0;92m\]" # Green
IYellow = "\[\033[0;93m\]" # Yellow
IBlue = "\[\033[0;94m\]" # Blue
IPurple = "\[\033[0;95m\]" # Purple
ICyan = "\[\033[0;96m\]" # Cyan
IWhite = "\[\033[0;97m\]" # White
# Bold High Intensty
BIBlack = "\[\033[1;90m\]" # Black
BIRed = "\[\033[1;91m\]" # Red
BIGreen = "\[\033[1;92m\]" # Green
BIYellow = "\[\033[1;93m\]" # Yellow
BIBlue = "\[\033[1;94m\]" # Blue
BIPurple = "\[\033[1;95m\]" # Purple
BICyan = "\[\033[1;96m\]" # Cyan
BIWhite = "\[\033[1;97m\]" # White
# High Intensty backgrounds
On_IBlack = "\[\033[0;100m\]" # Black
On_IRed = "\[\033[0;101m\]" # Red
On_IGreen = "\[\033[0;102m\]" # Green
On_IYellow = "\[\033[0;103m\]" # Yellow
On_IBlue = "\[\033[0;104m\]" # Blue
On_IPurple = "\[\033[10;95m\]" # Purple
On_ICyan = "\[\033[0;106m\]" # Cyan
On_IWhite = "\[\033[0;107m\]" # White
BLACK = "\[\e[00;30m\]"
DARY_GRAY = "\[\e[01;30m\]"
RED = "\[\e[00;31m\]"
BRIGHT_RED = "\[\e[01;31m\]"
GREEN = "\[\e[00;32m\]"
BRIGHT_GREEN = "\[\e[01;32m\]"
BROWN = "\[\e[00;33m\]"
YELLOW = "\[\e[01;33m\]"
BLUE = "\[\e[00;34m\]"
BRIGHT_BLUE = "\[\e[01;34m\]"
PURPLE = "\[\e[00;35m\]"
LIGHT_PURPLE = "\[\e[01;35m\]"
CYAN = "\[\e[00;36m\]"
BRIGHT_CYAN = "\[\e[01;36m\]"
LIGHT_GRAY = "\[\e[00;37m\]"
WHITE = "\[\e[01;37m\]"
ENDCOLOR = "\e[m"
*/
#elif M_INTERFACE==1
#define PROTECT(XXXX) XXXX
#define UNPROTECT(XXXX) XXXX
#elif P_INTERFACE == 1
#define PROTECT(XXXX) XXXX
#define UNPROTECT(XXXX) XXXX
extern PyTypeObject BarObject_Type; //tentative definition
extern PyObject* currentModule;
extern PyObject* classOutout;
extern PyObject* setClassObjects(PyObject* self, PyObject* args);
#endif
#ifdef __cplusplus
}
#endif
|
cda30ce8138b9fd11c5d589b22f18b542ad5e66e
|
99bdb3251fecee538e0630f15f6574054dfc1468
|
/bsp/efm32/Libraries/emlib/src/em_lcd.c
|
a136762f7854bbb8179441a6b4f15af39eb35b27
|
[
"Zlib",
"LicenseRef-scancode-proprietary-license",
"MIT",
"BSD-3-Clause",
"X11",
"BSD-4-Clause-UC",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
RT-Thread/rt-thread
|
03a7c52c2aeb1b06a544143b0e803d72f47d1ece
|
3602f891211904a27dcbd51e5ba72fefce7326b2
|
refs/heads/master
| 2023-09-01T04:10:20.295801
| 2023-08-31T16:20:55
| 2023-08-31T16:20:55
| 7,408,108
| 9,599
| 5,805
|
Apache-2.0
| 2023-09-14T13:37:26
| 2013-01-02T14:49:21
|
C
|
UTF-8
|
C
| false
| false
| 20,420
|
c
|
em_lcd.c
|
/***************************************************************************//**
* @file
* @brief Liquid Crystal Display (LCD) Peripheral API
* @author Energy Micro AS
* @version 3.0.0
*******************************************************************************
* @section License
* <b>(C) Copyright 2012 Energy Micro AS, http://www.energymicro.com</b>
*******************************************************************************
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must not
* claim that you wrote the original software.
* 2. Altered source versions must be plainly marked as such, and must not be
* misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source distribution.
*
* DISCLAIMER OF WARRANTY/LIMITATION OF REMEDIES: Energy Micro AS has no
* obligation to support this Software. Energy Micro AS is providing the
* Software "AS IS", with no express or implied warranties of any kind,
* including, but not limited to, any implied warranties of merchantability
* or fitness for any particular purpose or warranties against infringement
* of any proprietary rights of a third party.
*
* Energy Micro AS will not be liable for any consequential, incidental, or
* special damages, or any other relief, or for any claim by any third party,
* arising from your use of this Software.
*
******************************************************************************/
#include "em_lcd.h"
#if defined(LCD_COUNT) && (LCD_COUNT > 0)
#include "em_assert.h"
#include "em_bitband.h"
/***************************************************************************//**
* @addtogroup EM_Library
* @{
******************************************************************************/
/***************************************************************************//**
* @addtogroup LCD
* @brief Liquid Crystal Display (LCD) Peripheral API
* @{
******************************************************************************/
/***************************************************************************//**
* @brief
* Initalize Liquid Crystal Display (LCD) controller
*
* @details
* This function call will only configure the LCD controller. You must enable
* it afterwards, potentially configuring Frame Control and interrupts first
* according to requirements.
*
* @param[in] lcdInit
* Pointer to initialization structure which configures LCD controller.
*
******************************************************************************/
void LCD_Init(const LCD_Init_TypeDef *lcdInit)
{
uint32_t dispCtrl = LCD->DISPCTRL;
EFM_ASSERT(lcdInit != (void *) 0);
/* Disable controller before reconfiguration */
LCD_Enable(false);
/* Make sure we don't touch other bit fields (i.e. voltage boost) */
dispCtrl &= ~(
#if defined(_EFM32_TINY_FAMILY) || defined(_EFM32_GIANT_FAMILY)
_LCD_DISPCTRL_MUXE_MASK |
#endif
_LCD_DISPCTRL_MUX_MASK |
_LCD_DISPCTRL_BIAS_MASK |
_LCD_DISPCTRL_WAVE_MASK |
_LCD_DISPCTRL_VLCDSEL_MASK |
_LCD_DISPCTRL_CONCONF_MASK);
/* Configure controller according to initialization structure */
dispCtrl |= lcdInit->mux; /* also configures MUXE */
dispCtrl |= lcdInit->bias;
dispCtrl |= lcdInit->wave;
dispCtrl |= lcdInit->vlcd;
dispCtrl |= lcdInit->contrast;
/* Update display controller */
LCD->DISPCTRL = dispCtrl;
/* Enable controller if wanted */
if (lcdInit->enable)
{
LCD_Enable(true);
}
}
/***************************************************************************//**
* @brief
* Select source for VLCD
*
* @param[in] vlcd
* Select source for VLD voltage
******************************************************************************/
void LCD_VLCDSelect(LCD_VLCDSel_TypeDef vlcd)
{
uint32_t dispctrl = LCD->DISPCTRL;
/* Select VEXT or VDD */
dispctrl &= ~(_LCD_DISPCTRL_VLCDSEL_MASK);
switch (vlcd)
{
case lcdVLCDSelVExtBoost:
dispctrl |= LCD_DISPCTRL_VLCDSEL_VEXTBOOST;
break;
case lcdVLCDSelVDD:
dispctrl |= LCD_DISPCTRL_VLCDSEL_VDD;
break;
default:
break;
}
LCD->DISPCTRL = dispctrl;
}
/***************************************************************************//**
* @brief
* Configure Update Control
*
* @param[in] ud
* Configures LCD update method
******************************************************************************/
void LCD_UpdateCtrl(LCD_UpdateCtrl_TypeDef ud)
{
LCD->CTRL = (LCD->CTRL & ~_LCD_CTRL_UDCTRL_MASK) | ud;
}
/***************************************************************************//**
* @brief
* Initialize LCD Frame Counter
*
* @param[in] fcInit
* Pointer to Frame Counter initialization structure
******************************************************************************/
void LCD_FrameCountInit(const LCD_FrameCountInit_TypeDef *fcInit)
{
uint32_t bactrl = LCD->BACTRL;
EFM_ASSERT(fcInit != (void *) 0);
/* Verify FC Top Counter to be within limits */
EFM_ASSERT(fcInit->top < 64);
/* Reconfigure frame count configuration */
bactrl &= ~(_LCD_BACTRL_FCTOP_MASK |
_LCD_BACTRL_FCPRESC_MASK);
bactrl |= (fcInit->top << _LCD_BACTRL_FCTOP_SHIFT);
bactrl |= fcInit->prescale;
/* Set Blink and Animation Control Register */
LCD->BACTRL = bactrl;
LCD_FrameCountEnable(fcInit->enable);
}
/***************************************************************************//**
* @brief
* Configures LCD controller Animation feature
*
* @param[in] animInit
* Pointer to LCD Animation initialization structure
******************************************************************************/
void LCD_AnimInit(const LCD_AnimInit_TypeDef *animInit)
{
uint32_t bactrl = LCD->BACTRL;
EFM_ASSERT(animInit != (void *) 0);
/* Set Animation Register Values */
LCD->AREGA = animInit->AReg;
LCD->AREGB = animInit->BReg;
/* Configure Animation Shift and Logic */
bactrl &= ~(_LCD_BACTRL_AREGASC_MASK |
_LCD_BACTRL_AREGBSC_MASK |
_LCD_BACTRL_ALOGSEL_MASK);
bactrl |= (animInit->AShift << _LCD_BACTRL_AREGASC_SHIFT);
bactrl |= (animInit->BShift << _LCD_BACTRL_AREGBSC_SHIFT);
bactrl |= animInit->animLogic;
#if defined(_EFM32_GIANT_FAMILY)
if(animInit->startSeg == 0)
{
bactrl |= LCD_BACTRL_ALOC_SEG0TO7;
}
else if(animInit->startSeg == 8)
{
bactrl |= LCD_BACTRL_ALOC_SEG8TO15;
}
#endif
/* Reconfigure */
LCD->BACTRL = bactrl;
/* Enable */
LCD_AnimEnable(animInit->enable);
}
/***************************************************************************//**
* @brief
* Enables update of this range of LCD segment lines
*
* @param[in] segmentRange
* Range of 4 LCD segments lines to enable or disable, for all enabled COM
* lines
*
* @param[in] enable
* Bool true to enable segment updates, false to disable updates
******************************************************************************/
void LCD_SegmentRangeEnable(LCD_SegmentRange_TypeDef segmentRange, bool enable)
{
if (enable)
{
LCD->SEGEN |= segmentRange;
}
else
{
LCD->SEGEN &= ~((uint32_t)segmentRange);
}
}
/***************************************************************************//**
* @brief
* Turn on or clear a segment
*
* @note
* On Gecko Family, max configuration is (COM-lines x Segment-Lines) 4x40
* On Tiny Family, max configuration is 8x20 or 4x24
* On Giant Family, max configuration is 8x36 or 4x40
*
* @param[in] com
* COM line to change
*
* @param[in] bit
* Bit index of which field to change
*
* @param[in] enable
* When true will set segment, when false will clear segment
******************************************************************************/
void LCD_SegmentSet(int com, int bit, bool enable)
{
#if defined(_EFM32_TINY_FAMILY) || defined(_EFM32_GIANT_FAMILY)
/* Tiny and Giant Family supports up to 8 COM lines */
EFM_ASSERT(com < 8);
#else
/* Gecko Family supports up to 4 COM lines */
EFM_ASSERT(com < 4);
#endif
#if defined(_EFM32_GECKO_FAMILY) || defined(_EFM32_GIANT_FAMILY)
EFM_ASSERT(bit < 40);
#else
/* Tiny Gecko Family supports only "low" segment registers */
EFM_ASSERT(bit < 32);
#endif
/* Use bitband access for atomic bit set/clear of segment */
switch (com)
{
case 0:
if (bit < 32)
{
BITBAND_Peripheral(&(LCD->SEGD0L), bit, (unsigned int)enable);
}
#if defined(_EFM32_GECKO_FAMILY) || defined(_EFM32_GIANT_FAMILY)
else
{
bit -= 32;
BITBAND_Peripheral(&(LCD->SEGD0H), bit, (unsigned int)enable);
}
#endif
break;
case 1:
if (bit < 32)
{
BITBAND_Peripheral(&(LCD->SEGD1L), bit, (unsigned int)enable);
}
#if defined(_EFM32_GECKO_FAMILY) || defined(_EFM32_GIANT_FAMILY)
else
{
bit -= 32;
BITBAND_Peripheral(&(LCD->SEGD1H), bit, (unsigned int)enable);
}
#endif
break;
case 2:
if (bit < 32)
{
BITBAND_Peripheral(&(LCD->SEGD2L), bit, (unsigned int)enable);
}
#if defined(_EFM32_GECKO_FAMILY) || defined(_EFM32_GIANT_FAMILY)
else
{
bit -= 32;
BITBAND_Peripheral(&(LCD->SEGD2H), bit, (unsigned int)enable);
}
#endif
break;
case 3:
if (bit < 32)
{
BITBAND_Peripheral(&(LCD->SEGD3L), bit, (unsigned int)enable);
}
#if defined(_EFM32_GECKO_FAMILY) || defined(_EFM32_GIANT_FAMILY)
else
{
bit -= 32;
BITBAND_Peripheral(&(LCD->SEGD3H), bit, (unsigned int)enable);
}
#endif
break;
case 4:
#if defined(_EFM32_TINY_FAMILY) || defined(_EFM32_GIANT_FAMILY)
if (bit < 32)
{
BITBAND_Peripheral(&(LCD->SEGD4L), bit, (unsigned int)enable);
}
#endif
#if defined(_EFM32_GIANT_FAMILY)
else
{
bit -= 32;
BITBAND_Peripheral(&(LCD->SEGD4H), bit, (unsigned int)enable);
}
#endif
break;
case 5:
#if defined(_EFM32_TINY_FAMILY) || defined(_EFM32_GIANT_FAMILY)
if (bit < 32)
{
BITBAND_Peripheral(&(LCD->SEGD5L), bit, (unsigned int)enable);
}
#endif
#if defined(_EFM32_GIANT_FAMILY)
else
{
bit -= 32;
BITBAND_Peripheral(&(LCD->SEGD5H), bit, (unsigned int)enable);
}
#endif
break;
case 6:
#if defined(_EFM32_TINY_FAMILY) || defined(_EFM32_GIANT_FAMILY)
if (bit < 32)
{
BITBAND_Peripheral(&(LCD->SEGD6L), bit, (unsigned int)enable);
}
#endif
#if defined(_EFM32_GIANT_FAMILY)
else
{
bit -= 32;
BITBAND_Peripheral(&(LCD->SEGD6H), bit, (unsigned int)enable);
}
#endif
break;
case 7:
#if defined(_EFM32_TINY_FAMILY) || defined(_EFM32_GIANT_FAMILY)
if (bit < 32)
{
BITBAND_Peripheral(&(LCD->SEGD7L), bit, (unsigned int)enable);
}
#endif
#if defined(_EFM32_GIANT_FAMILY)
else
{
bit -= 32;
BITBAND_Peripheral(&(LCD->SEGD7H), bit, (unsigned int)enable);
}
#endif
break;
default:
EFM_ASSERT(0);
break;
}
}
/***************************************************************************//**
* @brief
* Updates the 0-31 lowest segments on a given COM-line in one operation,
* according to bit mask
*
* @param[in] com
* Which COM line to update
*
* @param[in] mask
* Bit mask for segments 0-31
*
* @param[in] bits
* Bit pattern for segments 0-31
******************************************************************************/
void LCD_SegmentSetLow(int com, uint32_t mask, uint32_t bits)
{
uint32_t segData;
/* Maximum number of com lines */
#if defined(_EFM32_TINY_FAMILY) || defined(_EFM32_GIANT_FAMILY)
EFM_ASSERT(com < 8);
#else
/* Gecko Family supports up to 4 COM lines */
EFM_ASSERT(com < 4);
#endif
switch (com)
{
case 0:
segData = LCD->SEGD0L;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD0L = segData;
break;
case 1:
segData = LCD->SEGD1L;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD1L = segData;
break;
case 2:
segData = LCD->SEGD2L;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD2L = segData;
break;
case 3:
segData = LCD->SEGD3L;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD3L = segData;
break;
#if defined(_EFM32_TINY_FAMILY) || defined(_EFM32_GIANT_FAMILY)
case 4:
segData = LCD->SEGD4L;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD4L = segData;
break;
#endif
#if defined(_EFM32_TINY_FAMILY) || defined(_EFM32_GIANT_FAMILY)
case 5:
segData = LCD->SEGD5L;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD5L = segData;
break;
#endif
#if defined(_EFM32_TINY_FAMILY) || defined(_EFM32_GIANT_FAMILY)
case 6:
segData = LCD->SEGD6L;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD6L = segData;
break;
#endif
#if defined(_EFM32_TINY_FAMILY) || defined(_EFM32_GIANT_FAMILY)
case 7:
segData = LCD->SEGD7L;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD7L = segData;
break;
#endif
default:
EFM_ASSERT(0);
break;
}
}
#if defined(_EFM32_GECKO_FAMILY) || defined(_EFM32_GIANT_FAMILY)
/***************************************************************************//**
* @brief
* Updated the high (32-39) segments on a given COM-line in one operation
*
* @param[in] com
* Which COM line to update
*
* @param[in] mask
* Bit mask for segments 32-39
*
* @param[in] bits
* Bit pattern for segments 32-39
******************************************************************************/
void LCD_SegmentSetHigh(int com, uint32_t mask, uint32_t bits)
{
uint32_t segData;
#if defined(_EFM32_GIANT_FAMILY)
EFM_ASSERT(com < 8);
#endif
#if defined(_EFM32_GECKO_FAMILY)
EFM_ASSERT(com < 4);
#endif
/* Maximum number of com lines */
switch (com)
{
case 0:
segData = LCD->SEGD0H;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD0H = segData;
break;
case 1:
segData = LCD->SEGD1H;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD1H = segData;
break;
case 2:
segData = LCD->SEGD2H;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD2H = segData;
break;
case 3:
segData = LCD->SEGD3H;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD3H = segData;
break;
#if defined(_EFM32_GIANT_FAMILY)
case 4:
segData = LCD->SEGD4H;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD4H = segData;
break;
#endif
#if defined(_EFM32_GIANT_FAMILY)
case 5:
segData = LCD->SEGD5H;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD5H = segData;
break;
#endif
#if defined(_EFM32_GIANT_FAMILY)
case 6:
segData = LCD->SEGD6H;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD6H = segData;
break;
#endif
#if defined(_EFM32_GIANT_FAMILY)
case 7:
segData = LCD->SEGD7H;
segData &= ~(mask);
segData |= (mask & bits);
LCD->SEGD7H = segData;
break;
#endif
default:
break;
}
}
#endif
/***************************************************************************//**
* @brief
* Configure contrast level on LCD panel
*
* @param[in] level
* Contrast level in the range 0-31
******************************************************************************/
void LCD_ContrastSet(int level)
{
EFM_ASSERT(level < 32);
LCD->DISPCTRL = (LCD->DISPCTRL & ~_LCD_DISPCTRL_CONLEV_MASK)
| (level << _LCD_DISPCTRL_CONLEV_SHIFT);
}
/***************************************************************************//**
* @brief
* Configure voltage booster
*
* The resulting voltage level is described in each part number's data sheet
*
* @param[in] vboost
* Voltage boost level
******************************************************************************/
void LCD_VBoostSet(LCD_VBoostLevel_TypeDef vboost)
{
/* Reconfigure Voltage Boost */
LCD->DISPCTRL = (LCD->DISPCTRL & ~_LCD_DISPCTRL_VBLEV_MASK) | vboost;
}
#if defined(_EFM32_TINY_FAMILY) || defined(_EFM32_GIANT_FAMILY)
/***************************************************************************//**
* @brief
* Configure bias level for a specific segment line for Direct Segment Control
*
* @note
* When DSC is active, each configuration takes up 4 bits in the Segment
* Registers (SEGD0L/SEGD1H) which defines bias level.
* For optimal use of this feature, the entire SEGD-registers should be set
* at once in a optimized routine, so this function is mainly here to
* demonstrate how to correctly configure the bias levels, and should be used
* with care.
*
* @param[in] segmentLine
* Segment line number
*
* @param[in] biasLevel
* Bias configuration level, 0-4. This value must be within the constraint
* defined by the LCD_DISPCTRL bias setting, see Reference Manual/Datasheet
******************************************************************************/
void LCD_BiasSegmentSet(int segmentLine, int biasLevel)
{
int biasRegister;
int bitShift;
volatile uint32_t *segmentRegister;
#if defined(_EFM32_TINY_FAMILY)
EFM_ASSERT(segmentLine < 20);
#endif
#if defined(_EFM32_GIANT_FAMILY)
EFM_ASSERT(segmentLine < 40);
#endif
#if defined(_EFM32_TINY_FAMILY)
/* Bias config for 8 segment lines per SEGDnL register */
biasRegister = segmentLine / 8;
bitShift = (segmentLine % 8) * 4;
switch (biasRegister)
{
case 0:
segmentRegister = &LCD->SEGD0L;
break;
case 1:
segmentRegister = &LCD->SEGD1L;
break;
case 2:
segmentRegister = &LCD->SEGD2L;
break;
case 3:
segmentRegister = &LCD->SEGD3L;
break;
default:
segmentRegister = (uint32_t *)0x00000000;
EFM_ASSERT(0);
break;
}
#endif
#if defined(_EFM32_GIANT_FAMILY)
/* Bias config for 10 segment lines per SEGDn L+H registers */
biasRegister = segmentLine / 10;
bitShift = (segmentLine % 10) * 4;
switch (biasRegister)
{
case 0:
if (bitShift < 32)
{
segmentRegister = &LCD->SEGD0L;
}
else
{
segmentRegister = &LCD->SEGD0H;
bitShift -= 32;
}
break;
case 1:
if (bitShift < 32)
{
segmentRegister = &LCD->SEGD1L;
}
else
{
segmentRegister = &LCD->SEGD1H;
bitShift -= 32;
}
break;
case 2:
if (bitShift < 32)
{
segmentRegister = &LCD->SEGD2L;
}
else
{
segmentRegister = &LCD->SEGD1H;
bitShift -= 32;
}
break;
case 3:
if (bitShift < 32)
{
segmentRegister = &LCD->SEGD3L;
}
else
{
segmentRegister = &LCD->SEGD3H;
bitShift -= 32;
}
break;
default:
segmentRegister = (uint32_t *)0x00000000;
EFM_ASSERT(0);
break;
}
#endif
/* Configure new bias setting */
*segmentRegister = (*segmentRegister & ~(0xF << bitShift)) | (biasLevel << bitShift);
}
/***************************************************************************//**
* @brief
* Configure bias level for a specific segment line
*
* @note
* When DSC is active, each configuration takes up 4 bits in the Segment
* Registers (SEGD4L/SEGD4H) which defines bias level.
* For optimal use of this feature, the entire SEGD-registers should be set
* at once in a optimized routine, so this function is mainly here to
* demonstrate how to correctly configure the bias levels, and should be used
* with care.
*
* @param[in] comLine
* COM line number, 0-7
*
* @param[in] biasLevel
* Bias configuration level, 0-4. This value must be within the constraint
* defined by the LCD_DISPCTRL bias setting, see Reference Manual/Datasheet
******************************************************************************/
void LCD_BiasComSet(int comLine, int biasLevel)
{
int bitShift;
EFM_ASSERT(comLine < 8);
bitShift = comLine * 4;
LCD->SEGD4L = (LCD->SEGD4L & ~(0xF << bitShift)) | (biasLevel << bitShift);
}
#endif
/** @} (end addtogroup LCD) */
/** @} (end addtogroup EM_Library) */
#endif /* defined(LCD_COUNT) && (LCD_COUNT > 0) */
|
bcd977f12bd9742fe6cee30390b11736c069ae9e
|
3ac1510c8c7b6aeeabbe9846bd82e763884b9a14
|
/tests/cunit/test_darray_3d.c
|
476dffbb4804dbfa73ea07813386d8b27147e960
|
[
"Apache-2.0"
] |
permissive
|
NCAR/ParallelIO
|
42269f1257e94c96cf0d8445a98ff5236fb083b7
|
6d52a8c7dfdea118955f951705934f70e3e373fe
|
refs/heads/main
| 2023-08-17T00:21:13.909648
| 2023-08-16T21:58:47
| 2023-08-16T21:58:47
| 32,825,351
| 119
| 56
|
Apache-2.0
| 2023-09-13T13:07:04
| 2015-03-24T21:10:44
|
C
|
UTF-8
|
C
| false
| false
| 14,406
|
c
|
test_darray_3d.c
|
/*
* Tests for PIO distributed arrays.
*
* @author Ed Hartnett
* @date 2/21/17
*/
#include <config.h>
#include <pio.h>
#include <pio_internal.h>
#include <pio_tests.h>
/* The number of tasks this test should run on. */
#define TARGET_NTASKS 4
/* The minimum number of tasks this test should run on. */
#define MIN_NTASKS 4
/* The name of this test. */
#define TEST_NAME "test_darray_3d"
/* Number of processors that will do IO. */
#define NUM_IO_PROCS 1
/* Number of computational components to create. */
#define COMPONENT_COUNT 1
/* The number of dimensions in the example data. In this test, we
* are using three-dimensional data. */
#define NDIM 4
/* But sometimes we need arrays of the non-record dimensions. */
#define NDIM3 3
/* The length of our sample data along each dimension. */
#define X_DIM_LEN 4
#define Y_DIM_LEN 4
#define Z_DIM_LEN 4
/* This is the length of the map for each task. */
#define EXPECTED_MAPLEN 16
/* The number of timesteps of data to write. */
#define NUM_TIMESTEPS 2
/* The name of the variable in the netCDF output files. */
#define VAR_NAME "foo"
/* Test with and without specifying a fill value to
* PIOc_write_darray(). */
#define NUM_TEST_CASES_FILLVALUE 2
/* The dimension names. */
char dim_name[NDIM][PIO_MAX_NAME + 1] = {"timestep", "x", "y", "z"};
/* Length of the dimensions in the sample data. */
int dim_len[NDIM] = {NC_UNLIMITED, X_DIM_LEN, Y_DIM_LEN, Z_DIM_LEN};
#define DIM_NAME "dim"
#define NDIM1 1
/* Run test for each of the rearrangers. */
#define NUM_REARRANGERS_TO_TEST 2
/* Create the decomposition to divide the 4-dimensional sample data
* between the 4 tasks. For the purposes of decomposition we are only
* concerned with 3 dimensions - we ignore the unlimited dimension.
*
* @param ntasks the number of available tasks
* @param my_rank rank of this task.
* @param iosysid the IO system ID.
* @param dim_len an array of length 3 with the dimension sizes.
* @param ioid a pointer that gets the ID of this decomposition.
* @returns 0 for success, error code otherwise.
**/
int create_decomposition_3d(int ntasks, int my_rank, int iosysid, int *ioid)
{
PIO_Offset elements_per_pe; /* Array elements per processing unit. */
PIO_Offset *compdof; /* The decomposition mapping. */
int dim_len_3d[NDIM3] = {X_DIM_LEN, Y_DIM_LEN, Z_DIM_LEN};
int ret;
/* How many data elements per task? In this example we will end up
* with 4. */
elements_per_pe = X_DIM_LEN * Y_DIM_LEN * Z_DIM_LEN / ntasks;
/* Allocate space for the decomposition array. */
if (!(compdof = malloc(elements_per_pe * sizeof(PIO_Offset))))
return PIO_ENOMEM;
/* Describe the decomposition. */
for (int i = 0; i < elements_per_pe; i++)
compdof[i] = my_rank * elements_per_pe + i;
/* Create the PIO decomposition for this test. */
if ((ret = PIOc_init_decomp(iosysid, PIO_INT, NDIM3, dim_len_3d, elements_per_pe,
compdof, ioid, 0, NULL, NULL)))
ERR(ret);
/* Free the mapping. */
free(compdof);
return 0;
}
/**
* Test the darray functionality. Create a netCDF file with 4
* dimensions and 1 PIO_INT variable, and use darray to write some
* data.
*
* @param iosysid the IO system ID.
* @param ioid the ID of the decomposition.
* @param num_flavors the number of IOTYPES available in this build.
* @param flavor array of available iotypes.
* @param my_rank rank of this task.
* @param provide_fill 1 if fillvalue should be provided to PIOc_write_darray().
* @returns 0 for success, error code otherwise.
*/
int test_darray(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank,
int provide_fill)
{
char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */
int dimids[NDIM]; /* The dimension IDs. */
int ncid; /* The ncid of the netCDF file. */
int ncid2; /* The ncid of the re-opened netCDF file. */
int varid; /* The ID of the netCDF varable. */
int ret; /* Return code. */
PIO_Offset arraylen = 16;
int int_fillvalue = NC_FILL_INT;
void *fillvalue = NULL;
int test_data[arraylen];
int test_data2[arraylen];
int test_data_in[arraylen];
/* Initialize some data. */
for (int f = 0; f < arraylen; f++)
{
test_data[f] = my_rank * 10 + f;
test_data2[f] = 2 * (my_rank * 10 + f);
}
/* Are we providing a fill value? */
if (provide_fill)
fillvalue = &int_fillvalue;
/* Use PIO to create the example file in each of the four
* available ways. */
for (int fmt = 0; fmt < num_flavors; fmt++)
{
/* Create the filename. */
sprintf(filename, "data_%s_iotype_%d.nc", TEST_NAME, flavor[fmt]);
/* Create the netCDF output file. */
if ((ret = PIOc_createfile(iosysid, &ncid, &flavor[fmt], filename, PIO_CLOBBER)))
ERR(ret);
/* Turn on fill mode. */
if ((ret = PIOc_set_fill(ncid, NC_FILL, NULL)))
ERR(ret);
/* Define netCDF dimensions and variable. */
for (int d = 0; d < NDIM; d++)
if ((ret = PIOc_def_dim(ncid, dim_name[d], (PIO_Offset)dim_len[d], &dimids[d])))
ERR(ret);
/* Define a variable. */
if ((ret = PIOc_def_var(ncid, VAR_NAME, PIO_INT, NDIM, dimids, &varid)))
ERR(ret);
/* End define mode. */
if ((ret = PIOc_enddef(ncid)))
ERR(ret);
/* Set the value of the record dimension. */
if ((ret = PIOc_setframe(ncid, varid, 0)))
ERR(ret);
/* Write the data. */
if ((ret = PIOc_write_darray(ncid, varid, ioid, arraylen, test_data, fillvalue)))
ERR(ret);
/* Set the value of the record dimension to the second record. */
if ((ret = PIOc_setframe(ncid, varid, 1)))
ERR(ret);
/* Write the data for the second record. */
if ((ret = PIOc_write_darray(ncid, varid, ioid, arraylen, test_data2, fillvalue)))
ERR(ret);
/* Close the netCDF file. */
if ((ret = PIOc_closefile(ncid)))
ERR(ret);
/* Reopen the file. */
if ((ret = PIOc_openfile(iosysid, &ncid2, &flavor[fmt], filename, PIO_NOWRITE)))
ERR(ret);
/* Set the value of the record dimension. */
if ((ret = PIOc_setframe(ncid2, varid, 0)))
ERR(ret);
/* Read the data. */
if ((ret = PIOc_read_darray(ncid2, varid, ioid, arraylen, test_data_in)))
ERR(ret);
/* Check the results. */
for (int f = 0; f < arraylen; f++)
if (test_data_in[f] != test_data[f])
return ERR_WRONG;
/* Set the value of the record dimension to the second record. */
if ((ret = PIOc_setframe(ncid2, varid, 1)))
ERR(ret);
/* Read the data. */
if ((ret = PIOc_read_darray(ncid2, varid, ioid, arraylen, test_data_in)))
ERR(ret);
/* Check the results. */
for (int f = 0; f < arraylen; f++)
if (test_data_in[f] != test_data2[f])
return ERR_WRONG;
/* Close the netCDF file. */
if ((ret = PIOc_closefile(ncid2)))
ERR(ret);
}
return PIO_NOERR;
}
/**
* Test the decomp read/write functionality.
*
* @param iosysid the IO system ID.
* @param ioid the ID of the decomposition.
* @param num_flavors the number of IOTYPES available in this build.
* @param flavor array of available iotypes.
* @param my_rank rank of this task.
* @param rearranger the rearranger to use (PIO_REARR_BOX or
* PIO_REARR_SUBSET).
* @param test_comm the MPI communicator for this test.
* @returns 0 for success, error code otherwise.
*/
int test_decomp_read_write(int iosysid, int ioid, int num_flavors, int *flavor, int my_rank,
int rearranger, MPI_Comm test_comm)
{
char filename[PIO_MAX_NAME + 1]; /* Name for the output files. */
int ioid2; /* ID for decomposition we will create from file. */
char title_in[PIO_MAX_NAME + 1]; /* Optional title. */
char history_in[PIO_MAX_NAME + 1]; /* Optional history. */
int fortran_order_in; /* Indicates fortran vs. c order. */
/* Use PIO to create the decomp file in each of the four
* available ways. */
for (int fmt = 0; fmt < num_flavors; fmt++)
{
int ret; /* Return code. */
/* Create the filename. */
sprintf(filename, "decomp_%s_iotype_%d.nc", TEST_NAME, flavor[fmt]);
if ((ret = PIOc_write_nc_decomp(iosysid, filename, 0, ioid, NULL, NULL, 0)))
return ret;
/* Read the data. */
if ((ret = PIOc_read_nc_decomp(iosysid, filename, &ioid2, test_comm, PIO_INT,
title_in, history_in, &fortran_order_in)))
return ret;
/* Check the results. */
{
iosystem_desc_t *ios;
io_desc_t *iodesc;
/* Get the IO system info. */
if (!(ios = pio_get_iosystem_from_id(iosysid)))
return pio_err(NULL, NULL, PIO_EBADID, __FILE__, __LINE__);
/* Get the IO desc, which describes the decomposition. */
if (!(iodesc = pio_get_iodesc_from_id(ioid2)))
return pio_err(ios, NULL, PIO_EBADID, __FILE__, __LINE__);
if (iodesc->ioid != ioid2 || iodesc->maplen != EXPECTED_MAPLEN || iodesc->ndims != NDIM3 ||
iodesc->ndof != EXPECTED_MAPLEN)
return ERR_WRONG;
if (iodesc->rearranger != rearranger || iodesc->maxregions != 1 ||
iodesc->needsfill || iodesc->mpitype != MPI_INT)
return ERR_WRONG;
for (int e = 0; e < iodesc->maplen; e++)
if (iodesc->map[e] != my_rank * iodesc->maplen + e + 1)
return ERR_WRONG;
if (iodesc->dimlen[0] != X_DIM_LEN || iodesc->dimlen[1] != Y_DIM_LEN ||
iodesc->dimlen[2] != Z_DIM_LEN)
return ERR_WRONG;
if (rearranger == PIO_REARR_SUBSET)
{
if (iodesc->nrecvs != 1 || iodesc->num_aiotasks != TARGET_NTASKS)
return ERR_WRONG;
}
else
{
/* I haven't figured out yet what these should be for
* the box rearranger. */
/* printf("iodesc->nrecv = %d iodesc->num_aiotasks = %d\n", iodesc->nrecvs, */
/* iodesc->num_aiotasks); */
}
}
/* Free the PIO decomposition. */
if ((ret = PIOc_freedecomp(iosysid, ioid2)))
ERR(ret);
}
return PIO_NOERR;
}
/**
* Run all the tests.
*
* @param iosysid the IO system ID.
* @param num_flavors number of available iotypes in the build.
* @param flavor pointer to array of the available iotypes.
* @param my_rank rank of this task.
* @param rearranger the rearranger to use (PIO_REARR_BOX or
* PIO_REARR_SUBSET).
* @param test_comm the communicator the test is running on.
* @returns 0 for success, error code otherwise.
*/
int test_all_darray(int iosysid, int num_flavors, int *flavor, int my_rank,
int rearranger, MPI_Comm test_comm)
{
int ioid;
int my_test_size;
int ret; /* Return code. */
if ((ret = MPI_Comm_size(test_comm, &my_test_size)))
MPIERR(ret);
/* Decompose the data over the tasks. */
if ((ret = create_decomposition_3d(TARGET_NTASKS, my_rank, iosysid, &ioid)))
return ret;
/* Test decomposition read/write. */
if ((ret = test_decomp_read_write(iosysid, ioid, num_flavors, flavor, my_rank,
rearranger, test_comm)))
return ret;
/* Test with/without providing a fill value to PIOc_write_darray(). */
for (int provide_fill = 0; provide_fill < NUM_TEST_CASES_FILLVALUE; provide_fill++)
{
/* Run a simple darray test. */
if ((ret = test_darray(iosysid, ioid, num_flavors, flavor, my_rank, provide_fill)))
return ret;
}
/* Free the PIO decomposition. */
if ((ret = PIOc_freedecomp(iosysid, ioid)))
ERR(ret);
return PIO_NOERR;
}
/* Run tests for darray functions. */
int main(int argc, char **argv)
{
int my_rank;
int ntasks;
MPI_Comm test_comm; /* A communicator for this test. */
int ret; /* Return code. */
/* Initialize test. */
if ((ret = pio_test_init2(argc, argv, &my_rank, &ntasks, MIN_NTASKS,
MIN_NTASKS, -1, &test_comm)))
ERR(ERR_INIT);
if ((ret = PIOc_set_iosystem_error_handling(PIO_DEFAULT, PIO_RETURN_ERROR, NULL)))
return ret;
/* Only do something on max_ntasks tasks. */
if (my_rank < TARGET_NTASKS)
{
int rearranger[NUM_REARRANGERS_TO_TEST] = {PIO_REARR_BOX, PIO_REARR_SUBSET};
int iosysid; /* The ID for the parallel I/O system. */
int ioproc_stride = 1; /* Stride in the mpi rank between io tasks. */
int ioproc_start = 0; /* Zero based rank of first processor to be used for I/O. */
int num_flavors; /* Number of PIO netCDF flavors in this build. */
int flavor[NUM_FLAVORS]; /* iotypes for the supported netCDF IO flavors. */
/* Figure out iotypes. */
if ((ret = get_iotypes(&num_flavors, flavor)))
ERR(ret);
for (int r = 0; r < NUM_REARRANGERS_TO_TEST; r++)
{
/* Initialize the PIO IO system. This specifies how
* many and which processors are involved in I/O. */
if ((ret = PIOc_Init_Intracomm(test_comm, TARGET_NTASKS, ioproc_stride,
ioproc_start, rearranger[r], &iosysid)))
return ret;
/* Run tests. */
if ((ret = test_all_darray(iosysid, num_flavors, flavor, my_rank,
rearranger[r], test_comm)))
return ret;
/* Finalize PIO system. */
if ((ret = PIOc_free_iosystem(iosysid)))
return ret;
} /* next rearranger */
} /* endif my_rank < TARGET_NTASKS */
/* Finalize the MPI library. */
if ((ret = pio_test_finalize(&test_comm)))
return ret;
printf("%d %s SUCCESS!!\n", my_rank, TEST_NAME);
return 0;
}
|
8224855ec6be07636424edd983f14c9f74fd32c9
|
0b1f18c37bfbf6901749916a5935c49b1afe17e3
|
/core/lib/experimental/gtk/win/include/glib-2.0/gio/gunixcredentialsmessage.h
|
ad70f74c3ee4770d007573405f4fbba660452707
|
[
"OpenSSL",
"Zlib",
"BSD-3-Clause"
] |
permissive
|
objeck/objeck-lang
|
2e97ff6c070492e53d4a5f1bec88e07dda4b4296
|
627482005da67618fd46133d18ade2d66e515284
|
refs/heads/master
| 2023-08-24T05:38:15.489677
| 2023-08-22T20:06:43
| 2023-08-22T20:06:43
| 20,608,381
| 240
| 33
|
NOASSERTION
| 2023-09-06T20:13:02
| 2014-06-08T03:25:12
|
C
|
UTF-8
|
C
| false
| false
| 3,246
|
h
|
gunixcredentialsmessage.h
|
/* GIO - GLib Input, Output and Streaming Library
*
* Copyright (C) 2010 Red Hat, Inc.
* Copyright (C) 2009 Codethink Limited
*
* SPDX-License-Identifier: LGPL-2.1-or-later
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General
* Public License along with this library; if not, see <http://www.gnu.org/licenses/>.
*
* Authors: David Zeuthen <davidz@redhat.com>
*/
#ifndef __G_UNIX_CREDENTIALS_MESSAGE_H__
#define __G_UNIX_CREDENTIALS_MESSAGE_H__
#include <gio/gio.h>
G_BEGIN_DECLS
#define G_TYPE_UNIX_CREDENTIALS_MESSAGE (g_unix_credentials_message_get_type ())
#define G_UNIX_CREDENTIALS_MESSAGE(o) (G_TYPE_CHECK_INSTANCE_CAST ((o), G_TYPE_UNIX_CREDENTIALS_MESSAGE, GUnixCredentialsMessage))
#define G_UNIX_CREDENTIALS_MESSAGE_CLASS(c) (G_TYPE_CHECK_CLASS_CAST ((c), G_TYPE_UNIX_CREDENTIALS_MESSAGE, GUnixCredentialsMessageClass))
#define G_IS_UNIX_CREDENTIALS_MESSAGE(o) (G_TYPE_CHECK_INSTANCE_TYPE ((o), G_TYPE_UNIX_CREDENTIALS_MESSAGE))
#define G_IS_UNIX_CREDENTIALS_MESSAGE_CLASS(c) (G_TYPE_CHECK_CLASS_TYPE ((c), G_TYPE_UNIX_CREDENTIALS_MESSAGE))
#define G_UNIX_CREDENTIALS_MESSAGE_GET_CLASS(o) (G_TYPE_INSTANCE_GET_CLASS ((o), G_TYPE_UNIX_CREDENTIALS_MESSAGE, GUnixCredentialsMessageClass))
typedef struct _GUnixCredentialsMessagePrivate GUnixCredentialsMessagePrivate;
typedef struct _GUnixCredentialsMessageClass GUnixCredentialsMessageClass;
G_DEFINE_AUTOPTR_CLEANUP_FUNC(GUnixCredentialsMessage, g_object_unref)
/**
* GUnixCredentialsMessageClass:
*
* Class structure for #GUnixCredentialsMessage.
*
* Since: 2.26
*/
struct _GUnixCredentialsMessageClass
{
GSocketControlMessageClass parent_class;
/*< private >*/
/* Padding for future expansion */
void (*_g_reserved1) (void);
void (*_g_reserved2) (void);
};
/**
* GUnixCredentialsMessage:
*
* The #GUnixCredentialsMessage structure contains only private data
* and should only be accessed using the provided API.
*
* Since: 2.26
*/
struct _GUnixCredentialsMessage
{
GSocketControlMessage parent_instance;
GUnixCredentialsMessagePrivate *priv;
};
GLIB_AVAILABLE_IN_ALL
GType g_unix_credentials_message_get_type (void) G_GNUC_CONST;
GLIB_AVAILABLE_IN_ALL
GSocketControlMessage *g_unix_credentials_message_new (void);
GLIB_AVAILABLE_IN_ALL
GSocketControlMessage *g_unix_credentials_message_new_with_credentials (GCredentials *credentials);
GLIB_AVAILABLE_IN_ALL
GCredentials *g_unix_credentials_message_get_credentials (GUnixCredentialsMessage *message);
GLIB_AVAILABLE_IN_ALL
gboolean g_unix_credentials_message_is_supported (void);
G_END_DECLS
#endif /* __G_UNIX_CREDENTIALS_MESSAGE_H__ */
|
b806cd0d8c8ddfbec01225d546bd1dff56a9492a
|
52c8ed39b32ccc7c0673278c1adea3638797c9ff
|
/src/external/pixman-0.40.0/arm64/pixman-arm-neon.c
|
be761c96529e999a582b6c1c2b8559a0aaeff8a7
|
[
"MIT"
] |
permissive
|
xboot/xboot
|
0cab7b440b612aa0a4c366025598a53a7ec3adf1
|
6d6b93947b7fcb8c3924fedb0715c23877eedd5e
|
refs/heads/master
| 2023-08-20T05:56:25.149388
| 2023-07-12T07:38:29
| 2023-07-12T07:38:29
| 471,539
| 765
| 296
|
MIT
| 2023-05-25T09:39:01
| 2010-01-14T08:25:12
|
C
|
UTF-8
|
C
| false
| false
| 26,582
|
c
|
pixman-arm-neon.c
|
/*
* Copyright © 2009 ARM Ltd, Movial Creative Technologies Oy
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of ARM Ltd not be used in
* advertising or publicity pertaining to distribution of the software without
* specific, written prior permission. ARM Ltd makes no
* representations about the suitability of this software for any purpose. It
* is provided "as is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
* SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
* AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
* OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*
* Author: Ian Rickards (ian.rickards@arm.com)
* Author: Jonathan Morton (jonathan.morton@movial.com)
* Author: Markku Vire (markku.vire@movial.com)
*
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <string.h>
#include "pixman-private.h"
#include "pixman-arm-common.h"
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_8888_8888,
uint32_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_x888_8888,
uint32_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0565_0565,
uint16_t, 1, uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0888_0888,
uint8_t, 3, uint8_t, 3)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_8888_0565,
uint32_t, 1, uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0565_8888,
uint16_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0888_8888_rev,
uint8_t, 3, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_0888_0565_rev,
uint8_t, 3, uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_pixbuf_8888,
uint32_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, src_rpixbuf_8888,
uint32_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, add_8_8,
uint8_t, 1, uint8_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, add_8888_8888,
uint32_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, over_8888_0565,
uint32_t, 1, uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, over_8888_8888,
uint32_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, out_reverse_8_0565,
uint8_t, 1, uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_DST (neon, out_reverse_8_8888,
uint8_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, neon, over_n_0565,
uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, neon, over_n_8888,
uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_DST (SKIP_ZERO_SRC, neon, over_reverse_n_8888,
uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_DST (0, neon, in_n_8,
uint8_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_0565,
uint8_t, 1, uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_8888,
uint8_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8888_8888_ca,
uint32_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8888_0565_ca,
uint32_t, 1, uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, over_n_8_8,
uint8_t, 1, uint8_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, add_n_8_8,
uint8_t, 1, uint8_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, neon, add_n_8_8888,
uint8_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (0, neon, src_n_8_8888,
uint8_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_N_MASK_DST (0, neon, src_n_8_8,
uint8_t, 1, uint8_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, over_8888_n_8888,
uint32_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, over_8888_n_0565,
uint32_t, 1, uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, over_0565_n_0565,
uint16_t, 1, uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_N_DST (SKIP_ZERO_MASK, neon, add_8888_n_8888,
uint32_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_8_8_8,
uint8_t, 1, uint8_t, 1, uint8_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_0565_8_0565,
uint16_t, 1, uint8_t, 1, uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_8888_8_8888,
uint32_t, 1, uint8_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, add_8888_8888_8888,
uint32_t, 1, uint32_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_8888_8_8888,
uint32_t, 1, uint8_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_8888_8888_8888,
uint32_t, 1, uint32_t, 1, uint32_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_8888_8_0565,
uint32_t, 1, uint8_t, 1, uint16_t, 1)
PIXMAN_ARM_BIND_FAST_PATH_SRC_MASK_DST (neon, over_0565_8_0565,
uint16_t, 1, uint8_t, 1, uint16_t, 1)
PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 8888_8888, OVER,
uint32_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 8888_0565, OVER,
uint32_t, uint16_t)
PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 8888_0565, SRC,
uint32_t, uint16_t)
PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_DST (neon, 0565_8888, SRC,
uint16_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_A8_DST (SKIP_ZERO_SRC, neon, 8888_8_0565,
OVER, uint32_t, uint16_t)
PIXMAN_ARM_BIND_SCALED_NEAREST_SRC_A8_DST (SKIP_ZERO_SRC, neon, 0565_8_0565,
OVER, uint16_t, uint16_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (0, neon, 8888_8888, SRC,
uint32_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (0, neon, 8888_0565, SRC,
uint32_t, uint16_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (0, neon, 0565_x888, SRC,
uint16_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (0, neon, 0565_0565, SRC,
uint16_t, uint16_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (SKIP_ZERO_SRC, neon, 8888_8888, OVER,
uint32_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_DST (SKIP_ZERO_SRC, neon, 8888_8888, ADD,
uint32_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (0, neon, 8888_8_8888, SRC,
uint32_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (0, neon, 8888_8_0565, SRC,
uint32_t, uint16_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (0, neon, 0565_8_x888, SRC,
uint16_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (0, neon, 0565_8_0565, SRC,
uint16_t, uint16_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (SKIP_ZERO_SRC, neon, 8888_8_8888, OVER,
uint32_t, uint32_t)
PIXMAN_ARM_BIND_SCALED_BILINEAR_SRC_A8_DST (SKIP_ZERO_SRC, neon, 8888_8_8888, ADD,
uint32_t, uint32_t)
void
pixman_composite_src_n_8_asm_neon (int32_t w,
int32_t h,
uint8_t *dst,
int32_t dst_stride,
uint8_t src);
void
pixman_composite_src_n_0565_asm_neon (int32_t w,
int32_t h,
uint16_t *dst,
int32_t dst_stride,
uint16_t src);
void
pixman_composite_src_n_8888_asm_neon (int32_t w,
int32_t h,
uint32_t *dst,
int32_t dst_stride,
uint32_t src);
static pixman_bool_t
arm_neon_fill (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t _xor)
{
/* stride is always multiple of 32bit units in pixman */
uint32_t byte_stride = stride * sizeof(uint32_t);
switch (bpp)
{
case 8:
pixman_composite_src_n_8_asm_neon (
width,
height,
(uint8_t *)(((char *) bits) + y * byte_stride + x),
byte_stride,
_xor & 0xff);
return TRUE;
case 16:
pixman_composite_src_n_0565_asm_neon (
width,
height,
(uint16_t *)(((char *) bits) + y * byte_stride + x * 2),
byte_stride / 2,
_xor & 0xffff);
return TRUE;
case 32:
pixman_composite_src_n_8888_asm_neon (
width,
height,
(uint32_t *)(((char *) bits) + y * byte_stride + x * 4),
byte_stride / 4,
_xor);
return TRUE;
default:
return FALSE;
}
}
static pixman_bool_t
arm_neon_blt (pixman_implementation_t *imp,
uint32_t * src_bits,
uint32_t * dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dest_x,
int dest_y,
int width,
int height)
{
if (src_bpp != dst_bpp)
return FALSE;
switch (src_bpp)
{
case 16:
pixman_composite_src_0565_0565_asm_neon (
width, height,
(uint16_t *)(((char *) dst_bits) +
dest_y * dst_stride * 4 + dest_x * 2), dst_stride * 2,
(uint16_t *)(((char *) src_bits) +
src_y * src_stride * 4 + src_x * 2), src_stride * 2);
return TRUE;
case 32:
pixman_composite_src_8888_8888_asm_neon (
width, height,
(uint32_t *)(((char *) dst_bits) +
dest_y * dst_stride * 4 + dest_x * 4), dst_stride,
(uint32_t *)(((char *) src_bits) +
src_y * src_stride * 4 + src_x * 4), src_stride);
return TRUE;
default:
return FALSE;
}
}
static const pixman_fast_path_t arm_neon_fast_paths[] =
{
PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, neon_composite_src_0565_0565),
PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, neon_composite_src_0565_0565),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, neon_composite_src_8888_0565),
PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, neon_composite_src_8888_0565),
PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, neon_composite_src_8888_0565),
PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, neon_composite_src_8888_0565),
PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, a8r8g8b8, neon_composite_src_0565_8888),
PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, x8r8g8b8, neon_composite_src_0565_8888),
PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, a8b8g8r8, neon_composite_src_0565_8888),
PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, x8b8g8r8, neon_composite_src_0565_8888),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, neon_composite_src_8888_8888),
PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, neon_composite_src_8888_8888),
PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, neon_composite_src_8888_8888),
PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, neon_composite_src_8888_8888),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, neon_composite_src_8888_8888),
PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, neon_composite_src_8888_8888),
PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, neon_composite_src_x888_8888),
PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, neon_composite_src_x888_8888),
PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, neon_composite_src_0888_0888),
PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, x8r8g8b8, neon_composite_src_0888_8888_rev),
PIXMAN_STD_FAST_PATH (SRC, b8g8r8, null, r5g6b5, neon_composite_src_0888_0565_rev),
PIXMAN_STD_FAST_PATH (SRC, pixbuf, pixbuf, a8r8g8b8, neon_composite_src_pixbuf_8888),
PIXMAN_STD_FAST_PATH (SRC, pixbuf, pixbuf, a8b8g8r8, neon_composite_src_rpixbuf_8888),
PIXMAN_STD_FAST_PATH (SRC, rpixbuf, rpixbuf, a8r8g8b8, neon_composite_src_rpixbuf_8888),
PIXMAN_STD_FAST_PATH (SRC, rpixbuf, rpixbuf, a8b8g8r8, neon_composite_src_pixbuf_8888),
PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8r8g8b8, neon_composite_src_n_8_8888),
PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, neon_composite_src_n_8_8888),
PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, neon_composite_src_n_8_8888),
PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, neon_composite_src_n_8_8888),
PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8, neon_composite_src_n_8_8),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8, neon_composite_over_n_8_8),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, neon_composite_over_n_8_0565),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, neon_composite_over_n_8_0565),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, neon_composite_over_n_8_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, neon_composite_over_n_8_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, neon_composite_over_n_8_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, neon_composite_over_n_8_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, neon_composite_over_n_0565),
PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, neon_composite_over_n_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, neon_composite_over_n_8888),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, neon_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, neon_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, neon_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, neon_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, neon_composite_over_n_8888_0565_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, neon_composite_over_n_8888_0565_ca),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, a8r8g8b8, neon_composite_over_8888_n_8888),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, x8r8g8b8, neon_composite_over_8888_n_8888),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, solid, r5g6b5, neon_composite_over_8888_n_0565),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, solid, b5g6r5, neon_composite_over_8888_n_0565),
PIXMAN_STD_FAST_PATH (OVER, r5g6b5, solid, r5g6b5, neon_composite_over_0565_n_0565),
PIXMAN_STD_FAST_PATH (OVER, b5g6r5, solid, b5g6r5, neon_composite_over_0565_n_0565),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, a8r8g8b8, neon_composite_over_8888_8_8888),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, x8r8g8b8, neon_composite_over_8888_8_8888),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, a8b8g8r8, neon_composite_over_8888_8_8888),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, x8b8g8r8, neon_composite_over_8888_8_8888),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8, r5g6b5, neon_composite_over_8888_8_0565),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8, b5g6r5, neon_composite_over_8888_8_0565),
PIXMAN_STD_FAST_PATH (OVER, r5g6b5, a8, r5g6b5, neon_composite_over_0565_8_0565),
PIXMAN_STD_FAST_PATH (OVER, b5g6r5, a8, b5g6r5, neon_composite_over_0565_8_0565),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, a8r8g8b8, neon_composite_over_8888_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, r5g6b5, neon_composite_over_8888_0565),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, neon_composite_over_8888_0565),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, neon_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, neon_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, neon_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, neon_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null, a8r8g8b8, neon_composite_src_x888_8888),
PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null, a8b8g8r8, neon_composite_src_x888_8888),
PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8, neon_composite_add_n_8_8),
PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8r8g8b8, neon_composite_add_n_8_8888),
PIXMAN_STD_FAST_PATH (ADD, solid, a8, a8b8g8r8, neon_composite_add_n_8_8888),
PIXMAN_STD_FAST_PATH (ADD, a8, a8, a8, neon_composite_add_8_8_8),
PIXMAN_STD_FAST_PATH (ADD, r5g6b5, a8, r5g6b5, neon_composite_add_0565_8_0565),
PIXMAN_STD_FAST_PATH (ADD, b5g6r5, a8, b5g6r5, neon_composite_add_0565_8_0565),
PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8, a8r8g8b8, neon_composite_add_8888_8_8888),
PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, a8, a8b8g8r8, neon_composite_add_8888_8_8888),
PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, a8r8g8b8, neon_composite_add_8888_8888_8888),
PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, solid, a8r8g8b8, neon_composite_add_8888_n_8888),
PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, solid, a8b8g8r8, neon_composite_add_8888_n_8888),
PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, neon_composite_add_8_8),
PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, neon_composite_add_8888_8888),
PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, neon_composite_add_8888_8888),
PIXMAN_STD_FAST_PATH (IN, solid, null, a8, neon_composite_in_n_8),
PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8r8g8b8, neon_composite_over_reverse_n_8888),
PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8b8g8r8, neon_composite_over_reverse_n_8888),
PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, r5g6b5, neon_composite_out_reverse_8_0565),
PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, b5g6r5, neon_composite_out_reverse_8_0565),
PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, a8r8g8b8, neon_composite_out_reverse_8_8888),
PIXMAN_STD_FAST_PATH (OUT_REVERSE, a8, null, a8b8g8r8, neon_composite_out_reverse_8_8888),
SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, neon_8888_8888),
SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, a8b8g8r8, neon_8888_8888),
SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, neon_8888_8888),
SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, neon_8888_8888),
SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, r5g6b5, neon_8888_0565),
SIMPLE_NEAREST_FAST_PATH (OVER, a8b8g8r8, b5g6r5, neon_8888_0565),
SIMPLE_NEAREST_FAST_PATH (SRC, a8r8g8b8, r5g6b5, neon_8888_0565),
SIMPLE_NEAREST_FAST_PATH (SRC, x8r8g8b8, r5g6b5, neon_8888_0565),
SIMPLE_NEAREST_FAST_PATH (SRC, a8b8g8r8, b5g6r5, neon_8888_0565),
SIMPLE_NEAREST_FAST_PATH (SRC, x8b8g8r8, b5g6r5, neon_8888_0565),
SIMPLE_NEAREST_FAST_PATH (SRC, b5g6r5, x8b8g8r8, neon_0565_8888),
SIMPLE_NEAREST_FAST_PATH (SRC, r5g6b5, x8r8g8b8, neon_0565_8888),
/* Note: NONE repeat is not supported yet */
SIMPLE_NEAREST_FAST_PATH_COVER (SRC, r5g6b5, a8r8g8b8, neon_0565_8888),
SIMPLE_NEAREST_FAST_PATH_COVER (SRC, b5g6r5, a8b8g8r8, neon_0565_8888),
SIMPLE_NEAREST_FAST_PATH_PAD (SRC, r5g6b5, a8r8g8b8, neon_0565_8888),
SIMPLE_NEAREST_FAST_PATH_PAD (SRC, b5g6r5, a8b8g8r8, neon_0565_8888),
PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, a8r8g8b8, r5g6b5, neon_8888_8_0565),
PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, a8b8g8r8, b5g6r5, neon_8888_8_0565),
PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, r5g6b5, r5g6b5, neon_0565_8_0565),
PIXMAN_ARM_SIMPLE_NEAREST_A8_MASK_FAST_PATH (OVER, b5g6r5, b5g6r5, neon_0565_8_0565),
SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, neon_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, neon_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, neon_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, r5g6b5, neon_8888_0565),
SIMPLE_BILINEAR_FAST_PATH (SRC, x8r8g8b8, r5g6b5, neon_8888_0565),
SIMPLE_BILINEAR_FAST_PATH (SRC, r5g6b5, x8r8g8b8, neon_0565_x888),
SIMPLE_BILINEAR_FAST_PATH (SRC, r5g6b5, r5g6b5, neon_0565_0565),
SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, neon_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, neon_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, neon_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (ADD, a8r8g8b8, x8r8g8b8, neon_8888_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, neon_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, neon_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, neon_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, r5g6b5, neon_8888_8_0565),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, x8r8g8b8, r5g6b5, neon_8888_8_0565),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, r5g6b5, x8r8g8b8, neon_0565_8_x888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, r5g6b5, r5g6b5, neon_0565_8_0565),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, neon_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, neon_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, neon_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, a8r8g8b8, x8r8g8b8, neon_8888_8_8888),
{ PIXMAN_OP_NONE },
};
#define BIND_COMBINE_U(name) \
void \
pixman_composite_scanline_##name##_mask_asm_neon (int32_t w, \
const uint32_t *dst, \
const uint32_t *src, \
const uint32_t *mask); \
\
void \
pixman_composite_scanline_##name##_asm_neon (int32_t w, \
const uint32_t *dst, \
const uint32_t *src); \
\
static void \
neon_combine_##name##_u (pixman_implementation_t *imp, \
pixman_op_t op, \
uint32_t * dest, \
const uint32_t * src, \
const uint32_t * mask, \
int width) \
{ \
if (mask) \
pixman_composite_scanline_##name##_mask_asm_neon (width, dest, \
src, mask); \
else \
pixman_composite_scanline_##name##_asm_neon (width, dest, src); \
}
BIND_COMBINE_U (over)
BIND_COMBINE_U (add)
BIND_COMBINE_U (out_reverse)
pixman_implementation_t *
_pixman_implementation_create_arm_neon (pixman_implementation_t *fallback)
{
pixman_implementation_t *imp =
_pixman_implementation_create (fallback, arm_neon_fast_paths);
imp->combine_32[PIXMAN_OP_OVER] = neon_combine_over_u;
imp->combine_32[PIXMAN_OP_ADD] = neon_combine_add_u;
imp->combine_32[PIXMAN_OP_OUT_REVERSE] = neon_combine_out_reverse_u;
imp->blt = arm_neon_blt;
imp->fill = arm_neon_fill;
return imp;
}
|
26ad5039ad170a74fad7af89cc44510a3ccc8490
|
41eb0837713f297134529591b66f3d4d82bcf98e
|
/src/Raine/source/alleg/debug/dz80.h
|
eae4877dee6650a0648ce3d0dfebaab057a6092f
|
[] |
no_license
|
AlexxandreFS/Batocera.PLUS
|
27b196b3cbb781b6fc99e62cad855396d1d5f8f2
|
997ee763ae7135fdf0c34a081e789918bd2eb169
|
refs/heads/master
| 2023-08-17T21:52:39.083687
| 2023-08-17T15:03:44
| 2023-08-17T15:03:44
| 215,869,486
| 135
| 57
| null | 2023-08-14T14:46:14
| 2019-10-17T19:23:42
|
C
|
UTF-8
|
C
| false
| false
| 212
|
h
|
dz80.h
|
#ifdef __cplusplus
extern "C" {
#endif
#ifndef NBLINES
#define NBLINES 20
typedef char dz80_buff[NBLINES][60];
int process_adr(UINT8 *base, UINT16 dAddr, dz80_buff buff);
#endif
#ifdef __cplusplus
}
#endif
|
ce705c70cd3075534948890dbfc55383e8ff0625
|
e73547787354afd9b717ea57fe8dd0695d161821
|
/src/world/area_tik/tik_03/tik_03_0_header.c
|
825d13850dbdc2aebd9b8963a9448a5cccd4d6d6
|
[] |
no_license
|
pmret/papermario
|
8b514b19653cef8d6145e47499b3636b8c474a37
|
9774b26d93f1045dd2a67e502b6efc9599fb6c31
|
refs/heads/main
| 2023-08-31T07:09:48.951514
| 2023-08-21T18:07:08
| 2023-08-21T18:07:08
| 287,151,133
| 904
| 139
| null | 2023-09-14T02:44:23
| 2020-08-13T01:22:57
|
C
|
UTF-8
|
C
| false
| false
| 406
|
c
|
tik_03_0_header.c
|
#include "tik_03.h"
EntryList N(Entrances) = {
[tik_03_ENTRY_0] { 320.0, -10.0, 0.0, 270.0 },
[tik_03_ENTRY_1] { -215.0, 60.0, 0.0, 90.0 },
[tik_03_ENTRY_2] { 260.0, -10.0, 0.0, 270.0 },
};
MapSettings N(settings) = {
.main = &N(EVS_Main),
.entryList = &N(Entrances),
.entryCount = ENTRY_COUNT(N(Entrances)),
.tattle = { MSG_MapTattle_tik_03 },
};
|
2cce3a446b144e0218e83eae3f19df5b2a9f89f9
|
b04663400b05fd638e41f3d7f61953c514d835ed
|
/inc/sal_types.h
|
9204046119420f372aef910dc3e66f36373dde7f
|
[
"BSD-3-Clause"
] |
permissive
|
LiteOS/LiteOS_Lab
|
0ee3edc08eb9d4c9ea5b52099da307adf1c06bfd
|
187e628b3f151515d920801b402bf38a19eee98c
|
refs/heads/iot-device-sdk-tiny
| 2023-09-04T16:22:21.579294
| 2023-04-25T03:09:46
| 2023-04-25T03:09:46
| 154,946,914
| 141
| 125
|
BSD-3-Clause
| 2022-08-25T09:12:56
| 2018-10-27T09:25:01
|
C
|
UTF-8
|
C
| false
| false
| 1,883
|
h
|
sal_types.h
|
/*
* sal_types.h
*
*/
#ifndef __SAL_TYPES_H
#define __SAL_TYPES_H
#include <stdint.h>
#include <stddef.h>
/**
* @brief: all the structure and defines needed by the applications should include
* this file;if you have the one yourself, please check whether they have the same value
* And these defines keep compatible with linux and
*/
#ifndef socklen_t
#define socklen_t int
#endif
#ifndef ssize_t
#define ssize_t int
#endif
#ifndef sa_family_t
#define sa_family_t uint16_t
#endif
#ifndef in_port_t
#define in_port_t uint16_t
#endif
#ifndef in_addr_t
#define in_addr_t uint32_t
#endif
struct sockaddr
{
sa_family_t sa_family; /* address family, AF_xxx */
char sa_data[14]; /* 14 bytes of protocol address */
};
struct linger
{
int l_onoff; /* Linger active */
int l_linger; /* How long to linger for */
};
struct in_addr
{
in_addr_t s_addr; /* IPv4 address */
};
struct sockaddr_in
{
sa_family_t sin_family; /* AF_INET */
in_port_t sin_port; /* Port number. */
struct in_addr sin_addr; /* Internet address. */
unsigned char sin_zero[8]; /* Pad to size of `struct sockaddr'. */
};
struct hostent {
char *h_name; /* Official name of the host. */
char **h_aliases; /* A pointer to an array of pointers to alternative host names,
terminated by a null pointer. */
int h_addrtype; /* Address type. */
int h_length; /* The length, in bytes, of the address. */
char **h_addr_list; /* A pointer to an array of pointers to network addresses (in
network byte order) for the host, terminated by a null pointer. */
#define h_addr h_addr_list[0] /* for backward compatibility */
};
#endif /* __SAL_TYPES_H */
|
7a934621f703c966b403cd0a13758e69896deecb
|
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
|
/SOFTWARE/A64-TERES/linux-a64/drivers/message/i2o/debug.c
|
ce62d8bfe1c832568b33418624f1acc5e16359ac
|
[
"GPL-2.0-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"Linux-syscall-note",
"GPL-2.0-only",
"GPL-1.0-or-later"
] |
permissive
|
OLIMEX/DIY-LAPTOP
|
ae82f4ee79c641d9aee444db9a75f3f6709afa92
|
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
|
refs/heads/rel3
| 2023-08-04T01:54:19.483792
| 2023-04-03T07:18:12
| 2023-04-03T07:18:12
| 80,094,055
| 507
| 92
|
Apache-2.0
| 2023-04-03T07:05:59
| 2017-01-26T07:25:50
|
C
|
UTF-8
|
C
| false
| false
| 11,212
|
c
|
debug.c
|
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/i2o.h>
static void i2o_report_util_cmd(u8 cmd);
static void i2o_report_exec_cmd(u8 cmd);
static void i2o_report_fail_status(u8 req_status, u32 * msg);
static void i2o_report_common_status(u8 req_status);
static void i2o_report_common_dsc(u16 detailed_status);
/*
* Used for error reporting/debugging purposes.
* Report Cmd name, Request status, Detailed Status.
*/
void i2o_report_status(const char *severity, const char *str,
struct i2o_message *m)
{
u32 *msg = (u32 *) m;
u8 cmd = (msg[1] >> 24) & 0xFF;
u8 req_status = (msg[4] >> 24) & 0xFF;
u16 detailed_status = msg[4] & 0xFFFF;
if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
return; // No status in this reply
printk("%s%s: ", severity, str);
if (cmd < 0x1F) // Utility cmd
i2o_report_util_cmd(cmd);
else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
i2o_report_exec_cmd(cmd);
else
printk("Cmd = %0#2x, ", cmd); // Other cmds
if (msg[0] & MSG_FAIL) {
i2o_report_fail_status(req_status, msg);
return;
}
i2o_report_common_status(req_status);
if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
i2o_report_common_dsc(detailed_status);
else
printk(" / DetailedStatus = %0#4x.\n",
detailed_status);
}
/* Used to dump a message to syslog during debugging */
void i2o_dump_message(struct i2o_message *m)
{
#ifdef DEBUG
u32 *msg = (u32 *) m;
int i;
printk(KERN_INFO "Dumping I2O message size %d @ %p\n",
msg[0] >> 16 & 0xffff, msg);
for (i = 0; i < ((msg[0] >> 16) & 0xffff); i++)
printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]);
#endif
}
/*
* Used for error reporting/debugging purposes.
* Following fail status are common to all classes.
* The preserved message must be handled in the reply handler.
*/
static void i2o_report_fail_status(u8 req_status, u32 * msg)
{
static char *FAIL_STATUS[] = {
"0x80", /* not used */
"SERVICE_SUSPENDED", /* 0x81 */
"SERVICE_TERMINATED", /* 0x82 */
"CONGESTION",
"FAILURE",
"STATE_ERROR",
"TIME_OUT",
"ROUTING_FAILURE",
"INVALID_VERSION",
"INVALID_OFFSET",
"INVALID_MSG_FLAGS",
"FRAME_TOO_SMALL",
"FRAME_TOO_LARGE",
"INVALID_TARGET_ID",
"INVALID_INITIATOR_ID",
"INVALID_INITIATOR_CONTEX", /* 0x8F */
"UNKNOWN_FAILURE" /* 0xFF */
};
if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n",
req_status);
else
printk("TRANSPORT_%s.\n",
FAIL_STATUS[req_status & 0x0F]);
/* Dump some details */
printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n",
(msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF);
printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n",
(msg[4] >> 8) & 0xFF, msg[4] & 0xFF);
printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n",
msg[5] >> 16, msg[5] & 0xFFF);
printk(KERN_ERR " Severity: 0x%02X\n", (msg[4] >> 16) & 0xFF);
if (msg[4] & (1 << 16))
printk(KERN_DEBUG "(FormatError), "
"this msg can never be delivered/processed.\n");
if (msg[4] & (1 << 17))
printk(KERN_DEBUG "(PathError), "
"this msg can no longer be delivered/processed.\n");
if (msg[4] & (1 << 18))
printk(KERN_DEBUG "(PathState), "
"the system state does not allow delivery.\n");
if (msg[4] & (1 << 19))
printk(KERN_DEBUG
"(Congestion), resources temporarily not available;"
"do not retry immediately.\n");
}
/*
* Used for error reporting/debugging purposes.
* Following reply status are common to all classes.
*/
static void i2o_report_common_status(u8 req_status)
{
static char *REPLY_STATUS[] = {
"SUCCESS",
"ABORT_DIRTY",
"ABORT_NO_DATA_TRANSFER",
"ABORT_PARTIAL_TRANSFER",
"ERROR_DIRTY",
"ERROR_NO_DATA_TRANSFER",
"ERROR_PARTIAL_TRANSFER",
"PROCESS_ABORT_DIRTY",
"PROCESS_ABORT_NO_DATA_TRANSFER",
"PROCESS_ABORT_PARTIAL_TRANSFER",
"TRANSACTION_ERROR",
"PROGRESS_REPORT"
};
if (req_status >= ARRAY_SIZE(REPLY_STATUS))
printk("RequestStatus = %0#2x", req_status);
else
printk("%s", REPLY_STATUS[req_status]);
}
/*
* Used for error reporting/debugging purposes.
* Following detailed status are valid for executive class,
* utility class, DDM class and for transaction error replies.
*/
static void i2o_report_common_dsc(u16 detailed_status)
{
static char *COMMON_DSC[] = {
"SUCCESS",
"0x01", // not used
"BAD_KEY",
"TCL_ERROR",
"REPLY_BUFFER_FULL",
"NO_SUCH_PAGE",
"INSUFFICIENT_RESOURCE_SOFT",
"INSUFFICIENT_RESOURCE_HARD",
"0x08", // not used
"CHAIN_BUFFER_TOO_LARGE",
"UNSUPPORTED_FUNCTION",
"DEVICE_LOCKED",
"DEVICE_RESET",
"INAPPROPRIATE_FUNCTION",
"INVALID_INITIATOR_ADDRESS",
"INVALID_MESSAGE_FLAGS",
"INVALID_OFFSET",
"INVALID_PARAMETER",
"INVALID_REQUEST",
"INVALID_TARGET_ADDRESS",
"MESSAGE_TOO_LARGE",
"MESSAGE_TOO_SMALL",
"MISSING_PARAMETER",
"TIMEOUT",
"UNKNOWN_ERROR",
"UNKNOWN_FUNCTION",
"UNSUPPORTED_VERSION",
"DEVICE_BUSY",
"DEVICE_NOT_AVAILABLE"
};
if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
printk(" / DetailedStatus = %0#4x.\n",
detailed_status);
else
printk(" / %s.\n", COMMON_DSC[detailed_status]);
}
/*
* Used for error reporting/debugging purposes
*/
static void i2o_report_util_cmd(u8 cmd)
{
switch (cmd) {
case I2O_CMD_UTIL_NOP:
printk("UTIL_NOP, ");
break;
case I2O_CMD_UTIL_ABORT:
printk("UTIL_ABORT, ");
break;
case I2O_CMD_UTIL_CLAIM:
printk("UTIL_CLAIM, ");
break;
case I2O_CMD_UTIL_RELEASE:
printk("UTIL_CLAIM_RELEASE, ");
break;
case I2O_CMD_UTIL_CONFIG_DIALOG:
printk("UTIL_CONFIG_DIALOG, ");
break;
case I2O_CMD_UTIL_DEVICE_RESERVE:
printk("UTIL_DEVICE_RESERVE, ");
break;
case I2O_CMD_UTIL_DEVICE_RELEASE:
printk("UTIL_DEVICE_RELEASE, ");
break;
case I2O_CMD_UTIL_EVT_ACK:
printk("UTIL_EVENT_ACKNOWLEDGE, ");
break;
case I2O_CMD_UTIL_EVT_REGISTER:
printk("UTIL_EVENT_REGISTER, ");
break;
case I2O_CMD_UTIL_LOCK:
printk("UTIL_LOCK, ");
break;
case I2O_CMD_UTIL_LOCK_RELEASE:
printk("UTIL_LOCK_RELEASE, ");
break;
case I2O_CMD_UTIL_PARAMS_GET:
printk("UTIL_PARAMS_GET, ");
break;
case I2O_CMD_UTIL_PARAMS_SET:
printk("UTIL_PARAMS_SET, ");
break;
case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
printk("UTIL_REPLY_FAULT_NOTIFY, ");
break;
default:
printk("Cmd = %0#2x, ", cmd);
}
}
/*
* Used for error reporting/debugging purposes
*/
static void i2o_report_exec_cmd(u8 cmd)
{
switch (cmd) {
case I2O_CMD_ADAPTER_ASSIGN:
printk("EXEC_ADAPTER_ASSIGN, ");
break;
case I2O_CMD_ADAPTER_READ:
printk("EXEC_ADAPTER_READ, ");
break;
case I2O_CMD_ADAPTER_RELEASE:
printk("EXEC_ADAPTER_RELEASE, ");
break;
case I2O_CMD_BIOS_INFO_SET:
printk("EXEC_BIOS_INFO_SET, ");
break;
case I2O_CMD_BOOT_DEVICE_SET:
printk("EXEC_BOOT_DEVICE_SET, ");
break;
case I2O_CMD_CONFIG_VALIDATE:
printk("EXEC_CONFIG_VALIDATE, ");
break;
case I2O_CMD_CONN_SETUP:
printk("EXEC_CONN_SETUP, ");
break;
case I2O_CMD_DDM_DESTROY:
printk("EXEC_DDM_DESTROY, ");
break;
case I2O_CMD_DDM_ENABLE:
printk("EXEC_DDM_ENABLE, ");
break;
case I2O_CMD_DDM_QUIESCE:
printk("EXEC_DDM_QUIESCE, ");
break;
case I2O_CMD_DDM_RESET:
printk("EXEC_DDM_RESET, ");
break;
case I2O_CMD_DDM_SUSPEND:
printk("EXEC_DDM_SUSPEND, ");
break;
case I2O_CMD_DEVICE_ASSIGN:
printk("EXEC_DEVICE_ASSIGN, ");
break;
case I2O_CMD_DEVICE_RELEASE:
printk("EXEC_DEVICE_RELEASE, ");
break;
case I2O_CMD_HRT_GET:
printk("EXEC_HRT_GET, ");
break;
case I2O_CMD_ADAPTER_CLEAR:
printk("EXEC_IOP_CLEAR, ");
break;
case I2O_CMD_ADAPTER_CONNECT:
printk("EXEC_IOP_CONNECT, ");
break;
case I2O_CMD_ADAPTER_RESET:
printk("EXEC_IOP_RESET, ");
break;
case I2O_CMD_LCT_NOTIFY:
printk("EXEC_LCT_NOTIFY, ");
break;
case I2O_CMD_OUTBOUND_INIT:
printk("EXEC_OUTBOUND_INIT, ");
break;
case I2O_CMD_PATH_ENABLE:
printk("EXEC_PATH_ENABLE, ");
break;
case I2O_CMD_PATH_QUIESCE:
printk("EXEC_PATH_QUIESCE, ");
break;
case I2O_CMD_PATH_RESET:
printk("EXEC_PATH_RESET, ");
break;
case I2O_CMD_STATIC_MF_CREATE:
printk("EXEC_STATIC_MF_CREATE, ");
break;
case I2O_CMD_STATIC_MF_RELEASE:
printk("EXEC_STATIC_MF_RELEASE, ");
break;
case I2O_CMD_STATUS_GET:
printk("EXEC_STATUS_GET, ");
break;
case I2O_CMD_SW_DOWNLOAD:
printk("EXEC_SW_DOWNLOAD, ");
break;
case I2O_CMD_SW_UPLOAD:
printk("EXEC_SW_UPLOAD, ");
break;
case I2O_CMD_SW_REMOVE:
printk("EXEC_SW_REMOVE, ");
break;
case I2O_CMD_SYS_ENABLE:
printk("EXEC_SYS_ENABLE, ");
break;
case I2O_CMD_SYS_MODIFY:
printk("EXEC_SYS_MODIFY, ");
break;
case I2O_CMD_SYS_QUIESCE:
printk("EXEC_SYS_QUIESCE, ");
break;
case I2O_CMD_SYS_TAB_SET:
printk("EXEC_SYS_TAB_SET, ");
break;
default:
printk("Cmd = %#02x, ", cmd);
}
}
void i2o_debug_state(struct i2o_controller *c)
{
printk(KERN_INFO "%s: State = ", c->name);
switch (((i2o_status_block *) c->status_block.virt)->iop_state) {
case 0x01:
printk("INIT\n");
break;
case 0x02:
printk("RESET\n");
break;
case 0x04:
printk("HOLD\n");
break;
case 0x05:
printk("READY\n");
break;
case 0x08:
printk("OPERATIONAL\n");
break;
case 0x10:
printk("FAILED\n");
break;
case 0x11:
printk("FAULTED\n");
break;
default:
printk("%x (unknown !!)\n",
((i2o_status_block *) c->status_block.virt)->iop_state);
}
};
void i2o_dump_hrt(struct i2o_controller *c)
{
u32 *rows = (u32 *) c->hrt.virt;
u8 *p = (u8 *) c->hrt.virt;
u8 *d;
int count;
int length;
int i;
int state;
if (p[3] != 0) {
printk(KERN_ERR
"%s: HRT table for controller is too new a version.\n",
c->name);
return;
}
count = p[0] | (p[1] << 8);
length = p[2];
printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n",
c->name, count, length << 2);
rows += 2;
for (i = 0; i < count; i++) {
printk(KERN_INFO "Adapter %08X: ", rows[0]);
p = (u8 *) (rows + 1);
d = (u8 *) (rows + 2);
state = p[1] << 8 | p[0];
printk("TID %04X:[", state & 0xFFF);
state >>= 12;
if (state & (1 << 0))
printk("H"); /* Hidden */
if (state & (1 << 2)) {
printk("P"); /* Present */
if (state & (1 << 1))
printk("C"); /* Controlled */
}
if (state > 9)
printk("*"); /* Hard */
printk("]:");
switch (p[3] & 0xFFFF) {
case 0:
/* Adapter private bus - easy */
printk("Local bus %d: I/O at 0x%04X Mem 0x%08X", p[2],
d[1] << 8 | d[0], *(u32 *) (d + 4));
break;
case 1:
/* ISA bus */
printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X", p[2],
d[2], d[1] << 8 | d[0], *(u32 *) (d + 4));
break;
case 2: /* EISA bus */
printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
p[2], d[3], d[1] << 8 | d[0], *(u32 *) (d + 4));
break;
case 3: /* MCA bus */
printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X", p[2],
d[3], d[1] << 8 | d[0], *(u32 *) (d + 4));
break;
case 4: /* PCI bus */
printk("PCI %d: Bus %d Device %d Function %d", p[2],
d[2], d[1], d[0]);
break;
case 0x80: /* Other */
default:
printk("Unsupported bus type.");
break;
}
printk("\n");
rows += length;
}
}
EXPORT_SYMBOL(i2o_dump_message);
|
b3491f16227f057a0681e60fb356c2c17587752f
|
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
|
/SOFTWARE/A64-TERES/linux-a64/include/linux/fs_uart_pd.h
|
36b61ff392779750a84d2176e27fbbafb0078a27
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"Linux-syscall-note",
"GPL-2.0-only",
"GPL-1.0-or-later"
] |
permissive
|
OLIMEX/DIY-LAPTOP
|
ae82f4ee79c641d9aee444db9a75f3f6709afa92
|
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
|
refs/heads/rel3
| 2023-08-04T01:54:19.483792
| 2023-04-03T07:18:12
| 2023-04-03T07:18:12
| 80,094,055
| 507
| 92
|
Apache-2.0
| 2023-04-03T07:05:59
| 2017-01-26T07:25:50
|
C
|
UTF-8
|
C
| false
| false
| 1,523
|
h
|
fs_uart_pd.h
|
/*
* Platform information definitions for the CPM Uart driver.
*
* 2006 (c) MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef FS_UART_PD_H
#define FS_UART_PD_H
#include <asm/types.h>
enum fs_uart_id {
fsid_smc1_uart,
fsid_smc2_uart,
fsid_scc1_uart,
fsid_scc2_uart,
fsid_scc3_uart,
fsid_scc4_uart,
fs_uart_nr,
};
static inline int fs_uart_id_scc2fsid(int id)
{
return fsid_scc1_uart + id - 1;
}
static inline int fs_uart_id_fsid2scc(int id)
{
return id - fsid_scc1_uart + 1;
}
static inline int fs_uart_id_smc2fsid(int id)
{
return fsid_smc1_uart + id - 1;
}
static inline int fs_uart_id_fsid2smc(int id)
{
return id - fsid_smc1_uart + 1;
}
struct fs_uart_platform_info {
void(*init_ioports)(struct fs_uart_platform_info *);
/* device specific information */
int fs_no; /* controller index */
char fs_type[4]; /* controller type */
u32 uart_clk;
u8 tx_num_fifo;
u8 tx_buf_size;
u8 rx_num_fifo;
u8 rx_buf_size;
u8 brg;
u8 clk_rx;
u8 clk_tx;
};
static inline int fs_uart_get_id(struct fs_uart_platform_info *fpi)
{
if(strstr(fpi->fs_type, "SMC"))
return fs_uart_id_smc2fsid(fpi->fs_no);
if(strstr(fpi->fs_type, "SCC"))
return fs_uart_id_scc2fsid(fpi->fs_no);
return fpi->fs_no;
}
#endif
|
8f7cdc8122a15da5cd86b95c3a21727cabffbce1
|
6d5a257bf72afdd85660ad3e938e2aa3025c680b
|
/ThirdParty/bplus-tree/include/private/utils.h
|
aa54bc61ceea68c4619cdc238e49f09fd16cd272
|
[
"Apache-2.0"
] |
permissive
|
OriginQ/QPanda-2
|
682b0e3bcdec7c66a651e8001d639f6146595fdf
|
a182212503a97981844140b165cb3cee8e293edd
|
refs/heads/master
| 2023-08-19T03:01:27.385297
| 2023-08-04T08:43:03
| 2023-08-04T08:43:03
| 136,144,680
| 1,207
| 96
|
Apache-2.0
| 2023-08-04T08:42:52
| 2018-06-05T08:23:20
|
C++
|
UTF-8
|
C
| false
| false
| 233
|
h
|
utils.h
|
#ifndef _PRIVATE_UTILS_H_
#define _PRIVATE_UTILS_H_
#include <stdint.h> /* uint64_t */
uint64_t bp__compute_hashl(uint64_t key);
uint64_t myhtonll(uint64_t value);
uint64_t myntohll(uint64_t value);
#endif /* _PRIVATE_UTILS_H_ */
|
81bdd705282f5a8131fdc11502d0569644535291
|
b970e053302588f44ee1c6b7187c4769934c857f
|
/ajax/libs/openlayers/5.1.0-beta.2/TileRange.js
|
efa9129c426f7bfe98f7bffe2997287b467a1b7c
|
[
"MIT"
] |
permissive
|
cdnjs/cdnjs
|
2fe0f21477c08618fe609da844f5d133224c3eda
|
6843ffa5339e4595b3a6893ae3e9ede1117cc5f9
|
refs/heads/master
| 2023-07-23T14:52:44.587645
| 2023-07-23T07:12:24
| 2023-07-23T07:12:24
| 1,409,811
| 8,894
| 5,633
|
MIT
| 2023-06-27T12:32:50
| 2011-02-25T05:53:47
| null |
UTF-8
|
C
| false
| false
| 3,373
|
js
|
TileRange.js
|
/**
* @module ol/TileRange
*/
/**
* A representation of a contiguous block of tiles. A tile range is specified
* by its min/max tile coordinates and is inclusive of coordinates.
*
* @constructor
* @param {number} minX Minimum X.
* @param {number} maxX Maximum X.
* @param {number} minY Minimum Y.
* @param {number} maxY Maximum Y.
* @struct
*/
const TileRange = function(minX, maxX, minY, maxY) {
/**
* @type {number}
*/
this.minX = minX;
/**
* @type {number}
*/
this.maxX = maxX;
/**
* @type {number}
*/
this.minY = minY;
/**
* @type {number}
*/
this.maxY = maxY;
};
/**
* @param {number} minX Minimum X.
* @param {number} maxX Maximum X.
* @param {number} minY Minimum Y.
* @param {number} maxY Maximum Y.
* @param {module:ol/TileRange=} tileRange TileRange.
* @return {module:ol/TileRange} Tile range.
*/
export function createOrUpdate(minX, maxX, minY, maxY, tileRange) {
if (tileRange !== undefined) {
tileRange.minX = minX;
tileRange.maxX = maxX;
tileRange.minY = minY;
tileRange.maxY = maxY;
return tileRange;
} else {
return new TileRange(minX, maxX, minY, maxY);
}
}
/**
* @param {module:ol/tilecoord~TileCoord} tileCoord Tile coordinate.
* @return {boolean} Contains tile coordinate.
*/
TileRange.prototype.contains = function(tileCoord) {
return this.containsXY(tileCoord[1], tileCoord[2]);
};
/**
* @param {module:ol/TileRange} tileRange Tile range.
* @return {boolean} Contains.
*/
TileRange.prototype.containsTileRange = function(tileRange) {
return this.minX <= tileRange.minX && tileRange.maxX <= this.maxX &&
this.minY <= tileRange.minY && tileRange.maxY <= this.maxY;
};
/**
* @param {number} x Tile coordinate x.
* @param {number} y Tile coordinate y.
* @return {boolean} Contains coordinate.
*/
TileRange.prototype.containsXY = function(x, y) {
return this.minX <= x && x <= this.maxX && this.minY <= y && y <= this.maxY;
};
/**
* @param {module:ol/TileRange} tileRange Tile range.
* @return {boolean} Equals.
*/
TileRange.prototype.equals = function(tileRange) {
return this.minX == tileRange.minX && this.minY == tileRange.minY &&
this.maxX == tileRange.maxX && this.maxY == tileRange.maxY;
};
/**
* @param {module:ol/TileRange} tileRange Tile range.
*/
TileRange.prototype.extend = function(tileRange) {
if (tileRange.minX < this.minX) {
this.minX = tileRange.minX;
}
if (tileRange.maxX > this.maxX) {
this.maxX = tileRange.maxX;
}
if (tileRange.minY < this.minY) {
this.minY = tileRange.minY;
}
if (tileRange.maxY > this.maxY) {
this.maxY = tileRange.maxY;
}
};
/**
* @return {number} Height.
*/
TileRange.prototype.getHeight = function() {
return this.maxY - this.minY + 1;
};
/**
* @return {module:ol/size~Size} Size.
*/
TileRange.prototype.getSize = function() {
return [this.getWidth(), this.getHeight()];
};
/**
* @return {number} Width.
*/
TileRange.prototype.getWidth = function() {
return this.maxX - this.minX + 1;
};
/**
* @param {module:ol/TileRange} tileRange Tile range.
* @return {boolean} Intersects.
*/
TileRange.prototype.intersects = function(tileRange) {
return this.minX <= tileRange.maxX &&
this.maxX >= tileRange.minX &&
this.minY <= tileRange.maxY &&
this.maxY >= tileRange.minY;
};
export default TileRange;
|
ee9c90542179fc24d13cca98368f5a8e2ef66016
|
098bdd84c5c13c1c750bd66848d3cc394a2f4da3
|
/source/MaterialXFormat/Export.h
|
3780d6b0d6f945161653b435b75c0172f83a2dfb
|
[
"Apache-2.0"
] |
permissive
|
autodesk-forks/MaterialX
|
0526c4cdd36cc4d0f0e1773b6833da2f28f53b84
|
f6fdb68d9929fa26458161718c6b26dd69f246de
|
refs/heads/adsk_contrib/dev
| 2023-08-17T03:00:13.163418
| 2023-07-24T17:35:14
| 2023-07-24T17:35:14
| 99,679,762
| 110
| 25
|
Apache-2.0
| 2023-09-13T05:14:38
| 2017-08-08T10:16:05
|
Mathematica
|
UTF-8
|
C
| false
| false
| 606
|
h
|
Export.h
|
//
// Copyright Contributors to the MaterialX Project
// SPDX-License-Identifier: Apache-2.0
//
#ifndef MATERIALX_FORMAT_EXPORT_H
#define MATERIALX_FORMAT_EXPORT_H
#include <MaterialXCore/Library.h>
/// @file
/// Macros for declaring imported and exported symbols.
#if defined(MATERIALX_FORMAT_EXPORTS)
#define MX_FORMAT_API MATERIALX_SYMBOL_EXPORT
#define MX_FORMAT_EXTERN_TEMPLATE(...) MATERIALX_EXPORT_EXTERN_TEMPLATE(__VA_ARGS__)
#else
#define MX_FORMAT_API MATERIALX_SYMBOL_IMPORT
#define MX_FORMAT_EXTERN_TEMPLATE(...) MATERIALX_IMPORT_EXTERN_TEMPLATE(__VA_ARGS__)
#endif
#endif
|
d5d2e5004d18b341561a3f57646dbadb685871c8
|
41eb0837713f297134529591b66f3d4d82bcf98e
|
/src/Raine/source/sdl/dialogs/sound_commands.h
|
683b193b4d0fd2745e6502e2a879a9649b3178ce
|
[] |
no_license
|
AlexxandreFS/Batocera.PLUS
|
27b196b3cbb781b6fc99e62cad855396d1d5f8f2
|
997ee763ae7135fdf0c34a081e789918bd2eb169
|
refs/heads/master
| 2023-08-17T21:52:39.083687
| 2023-08-17T15:03:44
| 2023-08-17T15:03:44
| 215,869,486
| 135
| 57
| null | 2023-08-14T14:46:14
| 2019-10-17T19:23:42
|
C
|
UTF-8
|
C
| false
| false
| 29
|
h
|
sound_commands.h
|
int do_sound_cmd(int sel);
|
777e0e5a96a5e58b1c9c7dd6ce11c7ab004d44cf
|
61998e6f0496e1e7e85d7d4fc520ea06980fdf14
|
/DuiVision/common/XUnZip.h
|
78e71184dc8087c6cf4a4ef1e9629cca58db6a49
|
[
"MIT"
] |
permissive
|
blueantst/DuiVision
|
1de866a5e385d2e5226b58558a6bfbe97c726c84
|
c4081d2212e9c1bb218d55eaa0cee4d3fe719c2b
|
refs/heads/master
| 2023-08-10T03:40:08.128824
| 2023-06-01T11:45:13
| 2023-06-01T11:45:13
| 16,739,387
| 425
| 248
|
MIT
| 2018-09-05T15:50:52
| 2014-02-11T17:45:10
|
C++
|
UTF-8
|
C
| false
| false
| 4,003
|
h
|
XUnZip.h
|
//XUnZip define
#pragma once
DECLARE_HANDLE(HZIP); // An HZIP identifies a zip file that has been opened
typedef DWORD ZRESULT;
#define ZIP_HANDLE 1
#define ZIP_FILENAME 2
#define ZIP_MEMORY 3
typedef struct
{
int index; // index of this file within the zip
char name[MAX_PATH]; // filename within the zip
DWORD attr; // attributes, as in GetFileAttributes.
FILETIME atime,ctime,mtime;// access, create, modify filetimes
long comp_size; // sizes of item, compressed and uncompressed. These
long unc_size; // may be -1 if not yet known (e.g. being streamed in)
} ZIPENTRY;
typedef struct
{
int index; // index of this file within the zip
TCHAR name[MAX_PATH]; // filename within the zip
DWORD attr; // attributes, as in GetFileAttributes.
FILETIME atime,ctime,mtime;// access, create, modify filetimes
long comp_size; // sizes of item, compressed and uncompressed. These
long unc_size; // may be -1 if not yet known (e.g. being streamed in)
} ZIPENTRYW;
// These are the result codes:
#define ZR_OK 0x00000000 // nb. the pseudo-code zr-recent is never returned,
#define ZR_RECENT 0x00000001 // but can be passed to FormatZipMessage.
// The following come from general system stuff (e.g. files not openable)
#define ZR_GENMASK 0x0000FF00
#define ZR_NODUPH 0x00000100 // couldn't duplicate the handle
#define ZR_NOFILE 0x00000200 // couldn't create/open the file
#define ZR_NOALLOC 0x00000300 // failed to allocate some resource
#define ZR_WRITE 0x00000400 // a general error writing to the file
#define ZR_NOTFOUND 0x00000500 // couldn't find that file in the zip
#define ZR_MORE 0x00000600 // there's still more data to be unzipped
#define ZR_CORRUPT 0x00000700 // the zipfile is corrupt or not a zipfile
#define ZR_READ 0x00000800 // a general error reading the file
// The following come from mistakes on the part of the caller
#define ZR_CALLERMASK 0x00FF0000
#define ZR_ARGS 0x00010000 // general mistake with the arguments
#define ZR_NOTMMAP 0x00020000 // tried to ZipGetMemory, but that only works on mmap zipfiles, which yours wasn't
#define ZR_MEMSIZE 0x00030000 // the memory size is too small
#define ZR_FAILED 0x00040000 // the thing was already failed when you called this function
#define ZR_ENDED 0x00050000 // the zip creation has already been closed
#define ZR_MISSIZE 0x00060000 // the indicated input file size turned out mistaken
#define ZR_PARTIALUNZ 0x00070000 // the file had already been partially unzipped
#define ZR_ZMODE 0x00080000 // tried to mix creating/opening a zip
// The following come from bugs within the zip library itself
#define ZR_BUGMASK 0xFF000000
#define ZR_NOTINITED 0x01000000 // initialisation didn't work
#define ZR_SEEK 0x02000000 // trying to seek in an unseekable file
#define ZR_NOCHANGE 0x04000000 // changed its mind on storage, but not allowed
#define ZR_FLATE 0x05000000 // an internal error in the de/inflation code
HZIP OpenZipU(void *z,unsigned int len,DWORD flags);
ZRESULT CloseZipU(HZIP hz);
ZRESULT GetZipItemA(HZIP hz, int index, ZIPENTRY *ze);
ZRESULT GetZipItemW(HZIP hz, int index, ZIPENTRYW *ze);
ZRESULT FindZipItemA(HZIP hz, const TCHAR *name, bool ic, int *index, ZIPENTRY *ze);
ZRESULT FindZipItemW(HZIP hz, const TCHAR *name, bool ic, int *index, ZIPENTRYW *ze);
ZRESULT UnzipItem(HZIP hz, int index, void *dst, unsigned int len, DWORD flags);
#ifdef _UNICODE
#define ZENTRY ZIPENTRYW
#define OpenZip OpenZipU
#define CloseZip(hz) CloseZipU(hz)
#define GetZipItem GetZipItemW
#define FindZipItem FindZipItemW
#else
#define ZENTRY ZIPENTRY
#define OpenZip OpenZipU
#define CloseZip(hz) CloseZipU(hz)
#define GetZipItem GetZipItemA
#define FindZipItem FindZipItemA
#endif
|
17f5a0a1c60166a0663b02de74ecc45fb1c8681d
|
e654a47dfd2e031093b6d216f610b861c8784384
|
/webrtc_dsp/modules/audio_processing/utility/delay_estimator_internal.h
|
e99fe21a8546b995b1e9f9cbb32582b95cccdf49
|
[
"Unlicense"
] |
permissive
|
grishka/libtgvoip
|
972bed7365cd00ee432b6a8843ea277daaedb97d
|
6c82c9de85cca69ea099a738fcbd2ab2de4f839d
|
refs/heads/public
| 2023-08-11T03:53:42.191273
| 2019-06-30T19:03:13
| 2019-06-30T19:03:13
| 80,738,369
| 420
| 140
|
Unlicense
| 2021-06-17T08:24:43
| 2017-02-02T15:27:50
|
C++
|
UTF-8
|
C
| false
| false
| 1,461
|
h
|
delay_estimator_internal.h
|
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// Header file including the delay estimator handle used for testing.
#ifndef MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_
#define MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_
#include "modules/audio_processing/utility/delay_estimator.h"
typedef union {
float float_;
int32_t int32_;
} SpectrumType;
typedef struct {
// Pointers to mean values of spectrum.
SpectrumType* mean_far_spectrum;
// |mean_far_spectrum| initialization indicator.
int far_spectrum_initialized;
int spectrum_size;
// Far-end part of binary spectrum based delay estimation.
BinaryDelayEstimatorFarend* binary_farend;
} DelayEstimatorFarend;
typedef struct {
// Pointers to mean values of spectrum.
SpectrumType* mean_near_spectrum;
// |mean_near_spectrum| initialization indicator.
int near_spectrum_initialized;
int spectrum_size;
// Binary spectrum based delay estimator
BinaryDelayEstimator* binary_handle;
} DelayEstimator;
#endif // MODULES_AUDIO_PROCESSING_UTILITY_DELAY_ESTIMATOR_INTERNAL_H_
|
5cd378fc00dab7bb501a0e5225e9ccf3400cf921
|
0aa0c78a6fcea85cf0ccc19c9b5eed25b1f71e18
|
/storm_analysis/sa_library/multi_fit.h
|
29afacf608a3bc1999d41c93299c708da0e71218
|
[] |
no_license
|
ZhuangLab/storm-analysis
|
5f61c3cb63d140fed43e64b1db0865dc0cfb2f62
|
26e4f8038180c3cf29909ed126daa9046f7cd8fc
|
refs/heads/master
| 2023-05-02T22:35:39.392231
| 2023-04-22T17:55:35
| 2023-04-22T17:55:35
| 9,655,861
| 102
| 81
| null | 2020-05-28T17:47:43
| 2013-04-24T19:25:56
|
Python
|
UTF-8
|
C
| false
| false
| 8,739
|
h
|
multi_fit.h
|
/*
* Common constants for multiple peak fitting.
*
* Hazen 10/17
*
*/
#ifndef MULTI_FIT_H
#define MULTI_FIT_H
/* debugging */
#define TESTING 0
#define VERBOSE 0
/* number of peak and results parameters. */
#define NFITTING 7
#define NPEAKPAR 9
/* indexes for peak fitting parameters. */
#define HEIGHT 0 /* Height */
#define XCENTER 1 /* X center */
#define XWIDTH 2 /* Width in x, only relevant for gaussians */
#define YCENTER 3 /* Y center */
#define YWIDTH 4 /* Width in y, only relevant for gaussians */
#define BACKGROUND 5 /* Background level under the peak */
#define ZCENTER 6 /* Z center */
/* additional indexes for results. */
#define STATUS 7 /* Status flag, see below */
#define IERROR 8 /* Error in the fit (integrated over the AOI) */
/* peak status flags */
#define RUNNING 0
#define CONVERGED 1
#define ERROR 2
#define HYSTERESIS 0.6 /* In order to move the AOI or change it's size,
the new value must differ from the old value
by at least this much (<= 0.5 is no hysteresis). */
/* fitting constants. */
#define LAMBDASTART 1.0 /* Initial lambda value. */
#define LAMBDADOWN 0.75 /* Multiplier for decreasing lambda. */
#define LAMBDAMAX 1.0e+20 /* Maximum lambda value, if we hit this the peak is lost as un-fittable. */
#define LAMBDAMIN 1.0e-3 /* Minimum lambda value. */
#define LAMBDAUP 4.0 /* Multiplier for increasing lambda, if necessary. */
/* peak storage. */
#define INCNPEAKS 500 /* Storage grows in units of 500 peaks. */
/* convergence metric */
#define DELTA_CONVERGENCE 0 /* The default is check for fitting error convergence. Set this
to 1 for convergence based on peak fit deltas. */
/*
* There is one of these for each peak to be fit.
*/
typedef struct peakData
{
int added; /* Counter for adding / subtracting the peak from the image. */
int index; /* Peak id. */
int iterations; /* Number of fitting iterations. */
int status; /* Status of the fit (running, converged, etc.). */
int xi; /* Location of the fitting area in x (starting pixel). */
int yi; /* Location of the fitting area in y (starting pixel). */
double error; /* Current error. */
double lambda; /* Levenberg-Marquadt lambda term. */
double params[NFITTING]; /* [height x-center x-width y-center y-width background z-center] */
double *psf; /* The peaks PSF. */
void *peak_model; /* Pointer to peak model specific data (i.e. spline data, etc.) */
} peakData;
/*
* This structure contains everything necessary to fit an array of peaks on an image.
*/
typedef struct fitData
{
/*
* These are for diagnostics.
*
* FIXME: These may overflow if there are lot of frames with a lot of localizations.
*/
int n_dposv; /* Number reset due to an error trying to solve Ax = b. */
int n_iterations; /* Number of iterations of fitting. */
int n_lost; /* Number of fits that were lost altogether. */
int n_margin; /* Number reset because they were too close to the edge of the image. */
int n_neg_fi; /* Number reset due to a negative fi. */
int n_neg_height; /* Number reset due to negative height. */
int n_non_converged; /* Number of fits that did not converge. */
int n_non_decr; /* Number of restarts due to non-decreasing error.*/
int fit_size_x; /* Size of the fitting area in x in pixels. */
int fit_size_y; /* Size of the fitting area in y in pixels. */
int image_size_x; /* Size of the image in x (fast axis). */
int image_size_y; /* Size of the image in y (slow axis). */
int jac_size; /* The number of terms in the Jacobian. */
int max_nfit; /* The (current) maximum number of peaks that we have storage for. */
int nfit; /* Number of peaks to fit. */
int roi_n_index; /* Number of valid points in the ROI index arrays. */
double minimum_height; /* This is used to clamp the minimum allowed peak starting height. */
double xoff; /* Offset between the peak center parameter in x and the actual center. */
double yoff; /* Offset between the peak center parameter in y and the actual center. */
double zoff; /* Offset between the peak center parameter in z and the actual center. */
double tolerance; /* Fit tolerance. */
int *bg_counts; /* Number of peaks covering a particular pixel. */
int *roi_x_index; /* Array that translates from ROI to image x coordinates. */
int *roi_y_index; /* Array that translates from ROI to image y coordinates. */
int *stale; /* Error value at pixel i is stale and needs to be updated. */
double *as_xi; /* Anscombe transform of the image data. */
double *bg_data; /* Fit (background) data. */
double *bg_estimate; /* Current background estimate (calculated externally). */
double *err_i; /* Current error at pixel i. */
double *f_data; /* Fit (foreground) data. */
double *rqe; /* Pixel relative quantum efficiency. */
double *scmos_term; /* sCMOS calibration term for each pixel (var/gain^2). */
double *t_fi; /* Transform of the fitting function. */
double *x_data; /* Image data. */
peakData *working_peak; /* Working copy of the peak that we are trying to improve the fit of. */
peakData *fit; /* The peaks to be fit to the image. */
void *fit_model; /* Other data/structures necessary to do the fitting, such as a cubic spline structure. */
/* Specific fitter versions must provide these functions. */
void (*fn_alloc_peaks)(struct peakData *, int); /* Function for allocating storage for peaks. */
void (*fn_calc_JH)(struct fitData *, double *, double *); /* Function for calculating the Jacobian and the Hessian. */
void (*fn_calc_peak_shape)(struct fitData *); /* Function for calculating the current peak shape. */
int (*fn_check)(struct fitData *); /* Function for checking the validity of the working peak parameters. */
void (*fn_copy_peak)(struct fitData *, struct peakData *, struct peakData *); /* Function for copying peaks. */
int (*fn_error_fn)(struct fitData *); /* Function for calculating the fitting error. */
void (*fn_free_peaks)(struct peakData *, int); /* Function for freeing storage for peaks. */
void (*fn_update)(struct fitData *, double *); /* Function for updating the working peak parameters. */
} fitData;
/*
* Functions.
*
* These all start with mFit to make it easier to figure
* out in libraries that use this code where they came from.
*/
void mFitAddPeak(fitData *);
double mFitAnscombe(double);
void mFitAnscombeTransformImage(fitData *);
int mFitCalcErr(fitData *);
int mFitCalcErrALS(fitData *);
int mFitCalcErrLS(fitData *);
int mFitCalcErrDWLS(fitData *);
int mFitCalcErrFWLS(fitData *);
int mFitCheck(fitData *);
void mFitCleanup(fitData *);
void mFitCopyPeak(fitData *, peakData *, peakData *);
int mFitDeltaConvergence(fitData *, int);
void mFitEstimatePeakBackground(fitData *);
void mFitEstimatePeakHeight(fitData *);
void mFitGetFitImage(fitData *, double *);
int mFitGetNError(fitData *);
void mFitGetPeakPropertyDouble(fitData *, double *, char *);
void mFitGetPeakPropertyInt(fitData *, int32_t *, char *);
void mFitGetResidual(fitData *, double *);
int mFitGetUnconverged(fitData *);
fitData *mFitInitialize(double *, double *, double, int, int);
void mFitInitializeROIIndexing(fitData *, int);
void mFitIterateOriginal(fitData *);
void mFitIterateLM(fitData *);
void mFitNewBackground(fitData *, double *);
void mFitNewImage(fitData *, double *);
void mFitNewPeaks(fitData *, int);
double mFitPeakBgSum(fitData *, peakData *);
double mFitPeakFgSum(fitData *, peakData *);
double mFitPeakFgSumSensitivityCorrected(fitData *, peakData *);
double mFitPeakSum(fitData *, peakData *);
void mFitRecenterPeaks(fitData *);
void mFitRemoveErrorPeaks(fitData *);
void mFitResetPeak(fitData *, int);
void mFitSetPeakStatus(fitData *, int32_t *);
int mFitSolve(double *, double *, int);
void mFitSubtractPeak(fitData *);
void mFitUpdate(peakData *);
void mFitUpdateParam(peakData *, double, int);
#endif
|
5856f07998aa0261685698bc9cebbafab959f7d7
|
0744dcc5394cebf57ebcba343747af6871b67017
|
/external/wpa_supplicant/src/crypto/crypto_none.c
|
954749f9efc58a7836371a4bc247955daec03363
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause"
] |
permissive
|
Samsung/TizenRT
|
96abf62f1853f61fcf91ff14671a5e0c6ca48fdb
|
1a5c2e00a4b1bbf4c505bbf5cc6a8259e926f686
|
refs/heads/master
| 2023-08-31T08:59:33.327998
| 2023-08-08T06:09:20
| 2023-08-31T04:38:20
| 82,517,252
| 590
| 719
|
Apache-2.0
| 2023-09-14T06:54:49
| 2017-02-20T04:38:30
|
C
|
UTF-8
|
C
| false
| false
| 449
|
c
|
crypto_none.c
|
/*
* WPA Supplicant / Empty template functions for crypto wrapper
* Copyright (c) 2005, Jouni Malinen <j@w1.fi>
*
* This software may be distributed under the terms of the BSD license.
* See README for more details.
*/
#include "includes.h"
#include "common.h"
#include "crypto.h"
int md4_vector(size_t num_elem, const u8 *addr[], const size_t *len, u8 *mac)
{
return 0;
}
void des_encrypt(const u8 *clear, const u8 *key, u8 *cypher)
{
}
|
816d0b105758849647802acffd11b298ae442161
|
e1cddfd754d952134e72dfd03522c5ea4fb6008e
|
/src/vnet/ip6-nd/ip6_nd_test.c
|
488ca591ba00dc17ac037839444722a46f5e6c3a
|
[
"Apache-2.0"
] |
permissive
|
FDio/vpp
|
0ad30fa1bec2975ffa6b66b45c9f4f32163123b6
|
f234b0d4626d7e686422cc9dfd25958584f4931e
|
refs/heads/master
| 2023-08-31T16:09:04.068646
| 2022-03-14T09:49:15
| 2023-08-31T09:50:00
| 96,556,718
| 1,048
| 630
|
Apache-2.0
| 2023-06-21T05:39:17
| 2017-07-07T16:29:40
|
C
|
UTF-8
|
C
| false
| false
| 9,357
|
c
|
ip6_nd_test.c
|
/*
* Copyright (c) 2015 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vat/vat.h>
#include <vlibapi/api.h>
#include <vlibmemory/api.h>
#include <vppinfra/error.h>
#include <vpp/api/types.h>
#include <vnet/ip/ip_format_fns.h>
#include <vnet/ethernet/ethernet_format_fns.h>
/* define message IDs */
#include <ip6-nd/ip6_nd.api_enum.h>
#include <ip6-nd/ip6_nd.api_types.h>
#include <vlibmemory/vlib.api_types.h>
typedef struct
{
/* API message ID base */
u16 msg_id_base;
u32 ping_id;
vat_main_t *vat_main;
} ip6_nd_test_main_t;
ip6_nd_test_main_t ip6_nd_test_main;
#define __plugin_msg_base ip6_nd_test_main.msg_id_base
#include <vlibapi/vat_helper_macros.h>
static int
api_want_ip6_ra_events (vat_main_t * vam)
{
return -1;
}
static int
api_ip6nd_send_router_solicitation (vat_main_t * vam)
{
return -1;
}
static int
api_ip6nd_proxy_add_del (vat_main_t * vam)
{
unformat_input_t *i = vam->input;
vl_api_ip6nd_proxy_add_del_t *mp;
u32 sw_if_index = ~0;
u8 v6_address_set = 0;
vl_api_ip6_address_t v6address;
u8 is_add = 1;
int ret;
/* Parse args required to build the message */
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
{
if (unformat (i, "%U", unformat_sw_if_index, vam, &sw_if_index))
;
else if (unformat (i, "sw_if_index %d", &sw_if_index))
;
else if (unformat (i, "%U", unformat_vl_api_ip6_address, &v6address))
v6_address_set = 1;
if (unformat (i, "del"))
is_add = 0;
else
{
clib_warning ("parse error '%U'", format_unformat_error, i);
return -99;
}
}
if (sw_if_index == ~0)
{
errmsg ("missing interface name or sw_if_index");
return -99;
}
if (!v6_address_set)
{
errmsg ("no address set");
return -99;
}
/* Construct the API message */
M (IP6ND_PROXY_ADD_DEL, mp);
mp->is_add = is_add;
mp->sw_if_index = ntohl (sw_if_index);
clib_memcpy (mp->ip, v6address, sizeof (v6address));
/* send it... */
S (mp);
/* Wait for a reply, return good/bad news */
W (ret);
return ret;
}
static int
api_ip6nd_proxy_dump (vat_main_t * vam)
{
vl_api_ip6nd_proxy_dump_t *mp;
vl_api_control_ping_t *mp_ping;
int ret;
M (IP6ND_PROXY_DUMP, mp);
S (mp);
/* Use a control ping for synchronization */
/* Use a control ping for synchronization */
mp_ping = vl_msg_api_alloc_as_if_client (sizeof (*mp_ping));
mp_ping->_vl_msg_id = htons (ip6_nd_test_main.ping_id);
mp_ping->client_index = vam->my_client_index;
vam->result_ready = 0;
S (mp_ping);
W (ret);
return ret;
}
static void vl_api_ip6nd_proxy_details_t_handler
(vl_api_ip6nd_proxy_details_t * mp)
{
vat_main_t *vam = &vat_main;
print (vam->ofp, "host %U sw_if_index %d",
format_vl_api_ip6_address, mp->ip, ntohl (mp->sw_if_index));
}
static int
api_sw_interface_ip6nd_ra_prefix (vat_main_t * vam)
{
unformat_input_t *i = vam->input;
vl_api_sw_interface_ip6nd_ra_prefix_t *mp;
u32 sw_if_index;
u8 sw_if_index_set = 0;
u8 v6_address_set = 0;
vl_api_prefix_t pfx;
u8 use_default = 0;
u8 no_advertise = 0;
u8 off_link = 0;
u8 no_autoconfig = 0;
u8 no_onlink = 0;
u8 is_no = 0;
u32 val_lifetime = 0;
u32 pref_lifetime = 0;
int ret;
/* Parse args required to build the message */
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
{
if (unformat (i, "%U", unformat_sw_if_index, vam, &sw_if_index))
sw_if_index_set = 1;
else if (unformat (i, "sw_if_index %d", &sw_if_index))
sw_if_index_set = 1;
else if (unformat (i, "%U", unformat_vl_api_prefix, &pfx))
v6_address_set = 1;
else if (unformat (i, "val_life %d", &val_lifetime))
;
else if (unformat (i, "pref_life %d", &pref_lifetime))
;
else if (unformat (i, "def"))
use_default = 1;
else if (unformat (i, "noadv"))
no_advertise = 1;
else if (unformat (i, "offl"))
off_link = 1;
else if (unformat (i, "noauto"))
no_autoconfig = 1;
else if (unformat (i, "nolink"))
no_onlink = 1;
else if (unformat (i, "isno"))
is_no = 1;
else
{
clib_warning ("parse error '%U'", format_unformat_error, i);
return -99;
}
}
if (sw_if_index_set == 0)
{
errmsg ("missing interface name or sw_if_index");
return -99;
}
if (!v6_address_set)
{
errmsg ("no address set");
return -99;
}
/* Construct the API message */
M (SW_INTERFACE_IP6ND_RA_PREFIX, mp);
mp->sw_if_index = ntohl (sw_if_index);
clib_memcpy (&mp->prefix, &pfx, sizeof (pfx));
mp->use_default = use_default;
mp->no_advertise = no_advertise;
mp->off_link = off_link;
mp->no_autoconfig = no_autoconfig;
mp->no_onlink = no_onlink;
mp->is_no = is_no;
mp->val_lifetime = ntohl (val_lifetime);
mp->pref_lifetime = ntohl (pref_lifetime);
/* send it... */
S (mp);
/* Wait for a reply, return good/bad news */
W (ret);
return ret;
}
static int
api_sw_interface_ip6nd_ra_config (vat_main_t * vam)
{
unformat_input_t *i = vam->input;
vl_api_sw_interface_ip6nd_ra_config_t *mp;
u32 sw_if_index;
u8 sw_if_index_set = 0;
u8 suppress = 0;
u8 managed = 0;
u8 other = 0;
u8 ll_option = 0;
u8 send_unicast = 0;
u8 cease = 0;
u8 is_no = 0;
u8 default_router = 0;
u32 max_interval = 0;
u32 min_interval = 0;
u32 lifetime = 0;
u32 initial_count = 0;
u32 initial_interval = 0;
int ret;
/* Parse args required to build the message */
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
{
if (unformat (i, "%U", unformat_sw_if_index, vam, &sw_if_index))
sw_if_index_set = 1;
else if (unformat (i, "sw_if_index %d", &sw_if_index))
sw_if_index_set = 1;
else if (unformat (i, "maxint %d", &max_interval))
;
else if (unformat (i, "minint %d", &min_interval))
;
else if (unformat (i, "life %d", &lifetime))
;
else if (unformat (i, "count %d", &initial_count))
;
else if (unformat (i, "interval %d", &initial_interval))
;
else if (unformat (i, "suppress") || unformat (i, "surpress"))
suppress = 1;
else if (unformat (i, "managed"))
managed = 1;
else if (unformat (i, "other"))
other = 1;
else if (unformat (i, "ll"))
ll_option = 1;
else if (unformat (i, "send"))
send_unicast = 1;
else if (unformat (i, "cease"))
cease = 1;
else if (unformat (i, "isno"))
is_no = 1;
else if (unformat (i, "def"))
default_router = 1;
else
{
clib_warning ("parse error '%U'", format_unformat_error, i);
return -99;
}
}
if (sw_if_index_set == 0)
{
errmsg ("missing interface name or sw_if_index");
return -99;
}
/* Construct the API message */
M (SW_INTERFACE_IP6ND_RA_CONFIG, mp);
mp->sw_if_index = ntohl (sw_if_index);
mp->max_interval = ntohl (max_interval);
mp->min_interval = ntohl (min_interval);
mp->lifetime = ntohl (lifetime);
mp->initial_count = ntohl (initial_count);
mp->initial_interval = ntohl (initial_interval);
mp->suppress = suppress;
mp->managed = managed;
mp->other = other;
mp->ll_option = ll_option;
mp->send_unicast = send_unicast;
mp->cease = cease;
mp->is_no = is_no;
mp->default_router = default_router;
/* send it... */
S (mp);
/* Wait for a reply, return good/bad news */
W (ret);
return ret;
}
static int
api_ip6nd_proxy_enable_disable (vat_main_t *vam)
{
// not yet implemented
return -1;
}
static int
api_sw_interface_ip6nd_ra_dump (vat_main_t *vam)
{
unformat_input_t *i = vam->input;
vl_api_sw_interface_ip6nd_ra_dump_t *mp;
vl_api_control_ping_t *mp_ping;
u32 sw_if_index = ~0;
int ret;
/* Parse args required to build the message */
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
{
if (unformat (i, "%U", unformat_sw_if_index, vam, &sw_if_index))
;
else if (unformat (i, "sw_if_index %u", &sw_if_index))
;
else
{
clib_warning ("parse error '%U'", format_unformat_error, i);
return -99;
}
}
/* Construct the API message */
M (SW_INTERFACE_IP6ND_RA_DUMP, mp);
mp->sw_if_index = ntohl (sw_if_index);
/* Send it */
S (mp);
/* Use control ping for synchronization */
PING (&ip6_nd_test_main, mp_ping);
S (mp_ping);
/* Wait for a reply... */
W (ret);
return ret;
}
static void
vl_api_sw_interface_ip6nd_ra_details_t_handler (
vl_api_sw_interface_ip6nd_ra_details_t *mp)
{
vat_main_t *vam = ip6_nd_test_main.vat_main;
u32 sw_if_index;
u8 send_radv;
/* Read the message */
sw_if_index = ntohl (mp->sw_if_index);
send_radv = mp->send_radv;
/* Print it */
print (vam->ofp, "sw_if_index: %u, send_radv: %s", sw_if_index,
(send_radv ? "on" : "off"));
}
#include <ip6-nd/ip6_nd.api_test.c>
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/
|
361120907840e8ba9b64cdc06486ed7833afa610
|
dd9f6011599a97923d1ac76647a0ec9ea2f4ea39
|
/ssh-xmss.c
|
b6d0561b14118aa8feb10b8e90f1faf881b764ac
|
[
"BSD-3-Clause",
"X11-distribute-modifications-variant",
"SSH-OpenSSH",
"ISC",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"snprintf",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
openssh/openssh-portable
|
e7c942a3f770954479e7fb76086301e15c7db851
|
3c6ab63b383b0b7630da175941e01de9db32a256
|
refs/heads/master
| 2023-08-26T06:28:41.837408
| 2023-08-25T04:48:02
| 2023-08-25T04:48:02
| 15,991,457
| 2,670
| 1,502
|
NOASSERTION
| 2023-09-10T06:45:57
| 2014-01-17T05:53:25
|
C
|
UTF-8
|
C
| false
| false
| 10,329
|
c
|
ssh-xmss.c
|
/* $OpenBSD: ssh-xmss.c,v 1.14 2022/10/28 00:44:44 djm Exp $*/
/*
* Copyright (c) 2017 Stefan-Lukas Gazdag.
* Copyright (c) 2017 Markus Friedl.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "includes.h"
#ifdef WITH_XMSS
#define SSHKEY_INTERNAL
#include <sys/types.h>
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#ifdef HAVE_STDINT_H
# include <stdint.h>
#endif
#include <unistd.h>
#include "log.h"
#include "sshbuf.h"
#include "sshkey.h"
#include "sshkey-xmss.h"
#include "ssherr.h"
#include "ssh.h"
#include "xmss_fast.h"
static void
ssh_xmss_cleanup(struct sshkey *k)
{
freezero(k->xmss_pk, sshkey_xmss_pklen(k));
freezero(k->xmss_sk, sshkey_xmss_sklen(k));
sshkey_xmss_free_state(k);
free(k->xmss_name);
free(k->xmss_filename);
k->xmss_pk = NULL;
k->xmss_sk = NULL;
k->xmss_name = NULL;
k->xmss_filename = NULL;
}
static int
ssh_xmss_equal(const struct sshkey *a, const struct sshkey *b)
{
if (a->xmss_pk == NULL || b->xmss_pk == NULL)
return 0;
if (sshkey_xmss_pklen(a) != sshkey_xmss_pklen(b))
return 0;
if (memcmp(a->xmss_pk, b->xmss_pk, sshkey_xmss_pklen(a)) != 0)
return 0;
return 1;
}
static int
ssh_xmss_serialize_public(const struct sshkey *key, struct sshbuf *b,
enum sshkey_serialize_rep opts)
{
int r;
if (key->xmss_name == NULL || key->xmss_pk == NULL ||
sshkey_xmss_pklen(key) == 0)
return SSH_ERR_INVALID_ARGUMENT;
if ((r = sshbuf_put_cstring(b, key->xmss_name)) != 0 ||
(r = sshbuf_put_string(b, key->xmss_pk,
sshkey_xmss_pklen(key))) != 0 ||
(r = sshkey_xmss_serialize_pk_info(key, b, opts)) != 0)
return r;
return 0;
}
static int
ssh_xmss_serialize_private(const struct sshkey *key, struct sshbuf *b,
enum sshkey_serialize_rep opts)
{
int r;
if (key->xmss_name == NULL)
return SSH_ERR_INVALID_ARGUMENT;
/* Note: can't reuse ssh_xmss_serialize_public because of sk order */
if ((r = sshbuf_put_cstring(b, key->xmss_name)) != 0 ||
(r = sshbuf_put_string(b, key->xmss_pk,
sshkey_xmss_pklen(key))) != 0 ||
(r = sshbuf_put_string(b, key->xmss_sk,
sshkey_xmss_sklen(key))) != 0 ||
(r = sshkey_xmss_serialize_state_opt(key, b, opts)) != 0)
return r;
return 0;
}
static int
ssh_xmss_copy_public(const struct sshkey *from, struct sshkey *to)
{
int r = SSH_ERR_INTERNAL_ERROR;
u_int32_t left;
size_t pklen;
if ((r = sshkey_xmss_init(to, from->xmss_name)) != 0)
return r;
if (from->xmss_pk == NULL)
return 0; /* XXX SSH_ERR_INTERNAL_ERROR ? */
if ((pklen = sshkey_xmss_pklen(from)) == 0 ||
sshkey_xmss_pklen(to) != pklen)
return SSH_ERR_INTERNAL_ERROR;
if ((to->xmss_pk = malloc(pklen)) == NULL)
return SSH_ERR_ALLOC_FAIL;
memcpy(to->xmss_pk, from->xmss_pk, pklen);
/* simulate number of signatures left on pubkey */
left = sshkey_xmss_signatures_left(from);
if (left)
sshkey_xmss_enable_maxsign(to, left);
return 0;
}
static int
ssh_xmss_deserialize_public(const char *ktype, struct sshbuf *b,
struct sshkey *key)
{
size_t len = 0;
char *xmss_name = NULL;
u_char *pk = NULL;
int ret = SSH_ERR_INTERNAL_ERROR;
if ((ret = sshbuf_get_cstring(b, &xmss_name, NULL)) != 0)
goto out;
if ((ret = sshkey_xmss_init(key, xmss_name)) != 0)
goto out;
if ((ret = sshbuf_get_string(b, &pk, &len)) != 0)
goto out;
if (len == 0 || len != sshkey_xmss_pklen(key)) {
ret = SSH_ERR_INVALID_FORMAT;
goto out;
}
key->xmss_pk = pk;
pk = NULL;
if (!sshkey_is_cert(key) &&
(ret = sshkey_xmss_deserialize_pk_info(key, b)) != 0)
goto out;
/* success */
ret = 0;
out:
free(xmss_name);
freezero(pk, len);
return ret;
}
static int
ssh_xmss_deserialize_private(const char *ktype, struct sshbuf *b,
struct sshkey *key)
{
int r;
char *xmss_name = NULL;
size_t pklen = 0, sklen = 0;
u_char *xmss_pk = NULL, *xmss_sk = NULL;
/* Note: can't reuse ssh_xmss_deserialize_public because of sk order */
if ((r = sshbuf_get_cstring(b, &xmss_name, NULL)) != 0 ||
(r = sshbuf_get_string(b, &xmss_pk, &pklen)) != 0 ||
(r = sshbuf_get_string(b, &xmss_sk, &sklen)) != 0)
goto out;
if (!sshkey_is_cert(key) &&
(r = sshkey_xmss_init(key, xmss_name)) != 0)
goto out;
if (pklen != sshkey_xmss_pklen(key) ||
sklen != sshkey_xmss_sklen(key)) {
r = SSH_ERR_INVALID_FORMAT;
goto out;
}
key->xmss_pk = xmss_pk;
key->xmss_sk = xmss_sk;
xmss_pk = xmss_sk = NULL;
/* optional internal state */
if ((r = sshkey_xmss_deserialize_state_opt(key, b)) != 0)
goto out;
/* success */
r = 0;
out:
free(xmss_name);
freezero(xmss_pk, pklen);
freezero(xmss_sk, sklen);
return r;
}
static int
ssh_xmss_sign(struct sshkey *key,
u_char **sigp, size_t *lenp,
const u_char *data, size_t datalen,
const char *alg, const char *sk_provider, const char *sk_pin, u_int compat)
{
u_char *sig = NULL;
size_t slen = 0, len = 0, required_siglen;
unsigned long long smlen;
int r, ret;
struct sshbuf *b = NULL;
if (lenp != NULL)
*lenp = 0;
if (sigp != NULL)
*sigp = NULL;
if (key == NULL ||
sshkey_type_plain(key->type) != KEY_XMSS ||
key->xmss_sk == NULL ||
sshkey_xmss_params(key) == NULL)
return SSH_ERR_INVALID_ARGUMENT;
if ((r = sshkey_xmss_siglen(key, &required_siglen)) != 0)
return r;
if (datalen >= INT_MAX - required_siglen)
return SSH_ERR_INVALID_ARGUMENT;
smlen = slen = datalen + required_siglen;
if ((sig = malloc(slen)) == NULL)
return SSH_ERR_ALLOC_FAIL;
if ((r = sshkey_xmss_get_state(key, 1)) != 0)
goto out;
if ((ret = xmss_sign(key->xmss_sk, sshkey_xmss_bds_state(key), sig, &smlen,
data, datalen, sshkey_xmss_params(key))) != 0 || smlen <= datalen) {
r = SSH_ERR_INVALID_ARGUMENT; /* XXX better error? */
goto out;
}
/* encode signature */
if ((b = sshbuf_new()) == NULL) {
r = SSH_ERR_ALLOC_FAIL;
goto out;
}
if ((r = sshbuf_put_cstring(b, "ssh-xmss@openssh.com")) != 0 ||
(r = sshbuf_put_string(b, sig, smlen - datalen)) != 0)
goto out;
len = sshbuf_len(b);
if (sigp != NULL) {
if ((*sigp = malloc(len)) == NULL) {
r = SSH_ERR_ALLOC_FAIL;
goto out;
}
memcpy(*sigp, sshbuf_ptr(b), len);
}
if (lenp != NULL)
*lenp = len;
/* success */
r = 0;
out:
if ((ret = sshkey_xmss_update_state(key, 1)) != 0) {
/* discard signature since we cannot update the state */
if (r == 0 && sigp != NULL && *sigp != NULL) {
explicit_bzero(*sigp, len);
free(*sigp);
}
if (sigp != NULL)
*sigp = NULL;
if (lenp != NULL)
*lenp = 0;
r = ret;
}
sshbuf_free(b);
if (sig != NULL)
freezero(sig, slen);
return r;
}
static int
ssh_xmss_verify(const struct sshkey *key,
const u_char *sig, size_t siglen,
const u_char *data, size_t dlen, const char *alg, u_int compat,
struct sshkey_sig_details **detailsp)
{
struct sshbuf *b = NULL;
char *ktype = NULL;
const u_char *sigblob;
u_char *sm = NULL, *m = NULL;
size_t len, required_siglen;
unsigned long long smlen = 0, mlen = 0;
int r, ret;
if (key == NULL ||
sshkey_type_plain(key->type) != KEY_XMSS ||
key->xmss_pk == NULL ||
sshkey_xmss_params(key) == NULL ||
sig == NULL || siglen == 0)
return SSH_ERR_INVALID_ARGUMENT;
if ((r = sshkey_xmss_siglen(key, &required_siglen)) != 0)
return r;
if (dlen >= INT_MAX - required_siglen)
return SSH_ERR_INVALID_ARGUMENT;
if ((b = sshbuf_from(sig, siglen)) == NULL)
return SSH_ERR_ALLOC_FAIL;
if ((r = sshbuf_get_cstring(b, &ktype, NULL)) != 0 ||
(r = sshbuf_get_string_direct(b, &sigblob, &len)) != 0)
goto out;
if (strcmp("ssh-xmss@openssh.com", ktype) != 0) {
r = SSH_ERR_KEY_TYPE_MISMATCH;
goto out;
}
if (sshbuf_len(b) != 0) {
r = SSH_ERR_UNEXPECTED_TRAILING_DATA;
goto out;
}
if (len != required_siglen) {
r = SSH_ERR_INVALID_FORMAT;
goto out;
}
if (dlen >= SIZE_MAX - len) {
r = SSH_ERR_INVALID_ARGUMENT;
goto out;
}
smlen = len + dlen;
mlen = smlen;
if ((sm = malloc(smlen)) == NULL || (m = malloc(mlen)) == NULL) {
r = SSH_ERR_ALLOC_FAIL;
goto out;
}
memcpy(sm, sigblob, len);
memcpy(sm+len, data, dlen);
if ((ret = xmss_sign_open(m, &mlen, sm, smlen,
key->xmss_pk, sshkey_xmss_params(key))) != 0) {
debug2_f("xmss_sign_open failed: %d", ret);
}
if (ret != 0 || mlen != dlen) {
r = SSH_ERR_SIGNATURE_INVALID;
goto out;
}
/* XXX compare 'm' and 'data' ? */
/* success */
r = 0;
out:
if (sm != NULL)
freezero(sm, smlen);
if (m != NULL)
freezero(m, smlen);
sshbuf_free(b);
free(ktype);
return r;
}
static const struct sshkey_impl_funcs sshkey_xmss_funcs = {
/* .size = */ NULL,
/* .alloc = */ NULL,
/* .cleanup = */ ssh_xmss_cleanup,
/* .equal = */ ssh_xmss_equal,
/* .ssh_serialize_public = */ ssh_xmss_serialize_public,
/* .ssh_deserialize_public = */ ssh_xmss_deserialize_public,
/* .ssh_serialize_private = */ ssh_xmss_serialize_private,
/* .ssh_deserialize_private = */ ssh_xmss_deserialize_private,
/* .generate = */ sshkey_xmss_generate_private_key,
/* .copy_public = */ ssh_xmss_copy_public,
/* .sign = */ ssh_xmss_sign,
/* .verify = */ ssh_xmss_verify,
};
const struct sshkey_impl sshkey_xmss_impl = {
/* .name = */ "ssh-xmss@openssh.com",
/* .shortname = */ "XMSS",
/* .sigalg = */ NULL,
/* .type = */ KEY_XMSS,
/* .nid = */ 0,
/* .cert = */ 0,
/* .sigonly = */ 0,
/* .keybits = */ 256,
/* .funcs = */ &sshkey_xmss_funcs,
};
const struct sshkey_impl sshkey_xmss_cert_impl = {
/* .name = */ "ssh-xmss-cert-v01@openssh.com",
/* .shortname = */ "XMSS-CERT",
/* .sigalg = */ NULL,
/* .type = */ KEY_XMSS_CERT,
/* .nid = */ 0,
/* .cert = */ 1,
/* .sigonly = */ 0,
/* .keybits = */ 256,
/* .funcs = */ &sshkey_xmss_funcs,
};
#endif /* WITH_XMSS */
|
33f567886568fd4dd5cfaa451d11c5ccaad3fd73
|
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
|
/SOFTWARE/A64-TERES/linux-a64/arch/x86/boot/pm.c
|
8062f89152504b19babbd9e293791356ac2791aa
|
[
"Linux-syscall-note",
"GPL-2.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
OLIMEX/DIY-LAPTOP
|
ae82f4ee79c641d9aee444db9a75f3f6709afa92
|
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
|
refs/heads/rel3
| 2023-08-04T01:54:19.483792
| 2023-04-03T07:18:12
| 2023-04-03T07:18:12
| 80,094,055
| 507
| 92
|
Apache-2.0
| 2023-04-03T07:05:59
| 2017-01-26T07:25:50
|
C
|
UTF-8
|
C
| false
| false
| 3,078
|
c
|
pm.c
|
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright 2007 rPath, Inc. - All Rights Reserved
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2.
*
* ----------------------------------------------------------------------- */
/*
* Prepare the machine for transition to protected mode.
*/
#include "boot.h"
#include <asm/segment.h>
/*
* Invoke the realmode switch hook if present; otherwise
* disable all interrupts.
*/
static void realmode_switch_hook(void)
{
if (boot_params.hdr.realmode_swtch) {
asm volatile("lcallw *%0"
: : "m" (boot_params.hdr.realmode_swtch)
: "eax", "ebx", "ecx", "edx");
} else {
asm volatile("cli");
outb(0x80, 0x70); /* Disable NMI */
io_delay();
}
}
/*
* Disable all interrupts at the legacy PIC.
*/
static void mask_all_interrupts(void)
{
outb(0xff, 0xa1); /* Mask all interrupts on the secondary PIC */
io_delay();
outb(0xfb, 0x21); /* Mask all but cascade on the primary PIC */
io_delay();
}
/*
* Reset IGNNE# if asserted in the FPU.
*/
static void reset_coprocessor(void)
{
outb(0, 0xf0);
io_delay();
outb(0, 0xf1);
io_delay();
}
/*
* Set up the GDT
*/
struct gdt_ptr {
u16 len;
u32 ptr;
} __attribute__((packed));
static void setup_gdt(void)
{
/* There are machines which are known to not boot with the GDT
being 8-byte unaligned. Intel recommends 16 byte alignment. */
static const u64 boot_gdt[] __attribute__((aligned(16))) = {
/* CS: code, read/execute, 4 GB, base 0 */
[GDT_ENTRY_BOOT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff),
/* DS: data, read/write, 4 GB, base 0 */
[GDT_ENTRY_BOOT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff),
/* TSS: 32-bit tss, 104 bytes, base 4096 */
/* We only have a TSS here to keep Intel VT happy;
we don't actually use it for anything. */
[GDT_ENTRY_BOOT_TSS] = GDT_ENTRY(0x0089, 4096, 103),
};
/* Xen HVM incorrectly stores a pointer to the gdt_ptr, instead
of the gdt_ptr contents. Thus, make it static so it will
stay in memory, at least long enough that we switch to the
proper kernel GDT. */
static struct gdt_ptr gdt;
gdt.len = sizeof(boot_gdt)-1;
gdt.ptr = (u32)&boot_gdt + (ds() << 4);
asm volatile("lgdtl %0" : : "m" (gdt));
}
/*
* Set up the IDT
*/
static void setup_idt(void)
{
static const struct gdt_ptr null_idt = {0, 0};
asm volatile("lidtl %0" : : "m" (null_idt));
}
/*
* Actual invocation sequence
*/
void go_to_protected_mode(void)
{
/* Hook before leaving real mode, also disables interrupts */
realmode_switch_hook();
/* Enable the A20 gate */
if (enable_a20()) {
puts("A20 gate not responding, unable to boot...\n");
die();
}
/* Reset coprocessor (IGNNE#) */
reset_coprocessor();
/* Mask all interrupts in the PIC */
mask_all_interrupts();
/* Actual transition to protected mode... */
setup_idt();
setup_gdt();
protected_mode_jump(boot_params.hdr.code32_start,
(u32)&boot_params + (ds() << 4));
}
|
65bf97376298ca0630325f8e87ae8ea7a7c945e0
|
03666e5f961946fc1a0ac67781ac1425562ef0d7
|
/src/common/parser/Sequence.C
|
985f4f15b968acc865fd00fe2852529f4b5f50c0
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
visit-dav/visit
|
e9f81b4d4b9b9930a0db9d5282cd1bcabf465e2e
|
601ae46e0bef2e18425b482a755d03490ade0493
|
refs/heads/develop
| 2023-09-06T08:19:38.397058
| 2023-09-05T21:29:32
| 2023-09-05T21:29:32
| 165,565,988
| 335
| 120
|
BSD-3-Clause
| 2023-09-14T00:53:37
| 2019-01-13T23:27:26
|
C
|
UTF-8
|
C
| false
| false
| 7,023
|
c
|
Sequence.C
|
// Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
// Project developers. See the top-level LICENSE file for dates and other
// details. No copyright assignment is required to contribute to VisIt.
#include "Sequence.h"
#include "Colors.h"
using std::vector;
//static const char dotchar = 183;
static const char dotchar = 'o';
static std::string dot = std::string() + TermBold + TermGreen + dotchar + TermReset;
// ****************************************************************************
// Constructor: Sequence::Sequence
//
// Programmer: Jeremy Meredith
// Creation: April 5, 2002
//
// ****************************************************************************
Sequence::Sequence()
{
len = 0;
}
// ****************************************************************************
// Copy Constructor: Sequence::Sequence
//
// Programmer: Jeremy Meredith
// Creation: April 5, 2002
//
// ****************************************************************************
Sequence::Sequence(const Sequence &s)
{
len = s.len;
symbols = s.symbols;
}
// ****************************************************************************
// Constructor: Sequence::Sequence
//
// Programmer: Jeremy Meredith
// Creation: April 5, 2002
//
// ****************************************************************************
Sequence::Sequence(const Symbol &s)
{
len = 1;
symbols.push_back(&s);
}
// ****************************************************************************
// Method: Sequence::AddSymbol
//
// Purpose:
// Append a symbol to this sequence.
//
// Programmer: Jeremy Meredith
// Creation: April 5, 2002
//
// ****************************************************************************
void
Sequence::AddSymbol(const Symbol &s)
{
len++;
symbols.push_back(&s);
}
// ****************************************************************************
// Method: Sequence::Print
//
// Purpose:
// Print the sequence.
//
// Programmer: Jeremy Meredith
// Creation: April 5, 2002
//
// Modifications:
// Jeremy Meredith, Wed Aug 6 15:56:21 EDT 2008
// Handle symbol printing better.
//
// ****************************************************************************
void
Sequence::Print(ostream &o, int pos) const
{
if (symbols.empty())
{
o << "e ";
if (pos == 0)
o << dot.c_str() << " ";
}
else
{
for (size_t i=0; i<symbols.size(); i++)
{
if ((size_t)pos == i) o << dot.c_str() << " ";
{
int tt = symbols[i]->GetTerminalType();
if (symbols[i]->IsNonTerminal())
o << *(symbols[i]) << " ";
else if (tt == '\\')
o << "(backslash) ";
else if (tt < 256)
o << char(tt) << " ";
else
o << *(symbols[i]) << " ";
}
}
if ((size_t)pos == symbols.size()) o << dot.c_str() << " ";
}
}
// ****************************************************************************
// Method: Sequence::IsNullable
//
// Purpose:
// Returns true if this sequence can produce the empty set.
//
// Programmer: Jeremy Meredith
// Creation: April 5, 2002
//
// ****************************************************************************
bool
Sequence::IsNullable(const vector<const Rule*> &rules) const
{
if (symbols.empty())
return true;
for (size_t i=0; i<symbols.size(); i++)
if (symbols[i]->IsTerminal())
return false;
bool nullable = true;
for (size_t i=0; i<symbols.size() && nullable; i++)
{
nullable = symbols[i]->IsNullable(rules);
}
return nullable;
}
// ****************************************************************************
// Method: Sequence::GetFirstSet
//
// Purpose:
// Get the first set of this sequence.
//
// Programmer: Jeremy Meredith
// Creation: April 5, 2002
//
// ****************************************************************************
SymbolSet
Sequence::GetFirstSet(const vector<const Rule*> &rules) const
{
SymbolSet first;
for (size_t i=0; i<symbols.size(); i++)
{
if (symbols[i]->IsTerminal())
{
first.insert(symbols[i]);
break;
}
first.merge( symbols[i]->GetFirstSet(rules) );
if (! symbols[i]->IsNullable(rules))
break;
}
return first;
}
// ****************************************************************************
// Method: operator+(Symbol, Symbol)
//
// Purpose:
// Creates a new sequence by joining two symbols.
//
// Programmer: Jeremy Meredith
// Creation: April 5, 2002
//
// ****************************************************************************
Sequence
operator+(const Symbol &l, const Symbol &r)
{
Sequence s;
s.AddSymbol(l);
s.AddSymbol(r);
return s;
}
// ****************************************************************************
// Method: operator+(Sequence, Symbol)
//
// Purpose:
// Creates a new sequence by appending a symbol.
//
// Programmer: Jeremy Meredith
// Creation: April 5, 2002
//
// ****************************************************************************
Sequence
operator+(const Sequence &l, const Symbol &r)
{
Sequence s(l);
s.AddSymbol(r);
return s;
}
// ****************************************************************************
// Method: Sequence::GetSubsequence
//
// Purpose:
// Gets the right part of a sequence starting with the given symbol.
//
// Programmer: Jeremy Meredith
// Creation: April 5, 2002
//
// ****************************************************************************
Sequence
Sequence::GetSubsequence(int pos) const
{
Sequence s;
for (size_t i=pos; i<symbols.size(); i++)
s.AddSymbol(*symbols[i]);
return s;
}
// ****************************************************************************
// Method: Sequence::GetLastTerminal
//
// Purpose:
// Gets the rightmost terminal of the sequence if there is one.
//
// Programmer: Jeremy Meredith
// Creation: April 5, 2002
//
// ****************************************************************************
const Symbol*
Sequence::GetLastTerminal() const
{
const Symbol *nt = NULL;
for (size_t i=0; i<symbols.size(); i++)
if (symbols[i]->IsTerminal())
nt = symbols[i];
return nt;
}
// ****************************************************************************
// Equality operator: Sequence::operator==
//
// Programmer: Jeremy Meredith
// Creation: April 5, 2002
//
// ****************************************************************************
bool
Sequence::operator==(const Sequence &s) const
{
if (len != s.len)
return false;
for (int i=0; i<len; i++)
if (symbols[i] != s.symbols[i])
return false;
return true;
}
|
757b6cbfd589d6f021abf2bdfebf3a1f3004e240
|
9ceacf33fd96913cac7ef15492c126d96cae6911
|
/regress/lib/libm/msun/fenv_test.c
|
7cb690e961c4e7f22982aa35cb196e5f4fffa56c
|
[] |
no_license
|
openbsd/src
|
ab97ef834fd2d5a7f6729814665e9782b586c130
|
9e79f3a0ebd11a25b4bff61e900cb6de9e7795e9
|
refs/heads/master
| 2023-09-02T18:54:56.624627
| 2023-09-02T15:16:12
| 2023-09-02T15:16:12
| 66,966,208
| 3,394
| 1,235
| null | 2023-08-08T02:42:25
| 2016-08-30T18:18:25
|
C
|
UTF-8
|
C
| false
| false
| 16,607
|
c
|
fenv_test.c
|
/* $OpenBSD: fenv_test.c,v 1.7 2021/12/13 18:04:28 deraadt Exp $ */
/*-
* Copyright (c) 2004 David Schultz <das@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "macros.h"
/*
* Test the correctness and C99-compliance of various fenv.h features.
*/
#include <sys/types.h>
#include <sys/wait.h>
#include <assert.h>
#include <err.h>
#include <fenv.h>
#include <float.h>
#ifndef __OpenBSD__
#include <libutil.h>
#endif
#include <math.h>
#include <signal.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "test-utils.h"
#define NEXCEPTS (sizeof(std_excepts) / sizeof(std_excepts[0]))
static const int std_excepts[] = {
FE_INVALID,
FE_DIVBYZERO,
FE_OVERFLOW,
FE_UNDERFLOW,
FE_INEXACT,
};
/* init_exceptsets() initializes this to the power set of std_excepts[] */
static int std_except_sets[1 << NEXCEPTS];
#pragma STDC FENV_ACCESS ON
/*
* Initialize std_except_sets[] to the power set of std_excepts[]
*/
static __attribute__((constructor)) void
do_setup(void)
{
unsigned i, j, sr;
/* Avoid double output after fork() */
setvbuf(stdout, NULL, _IONBF, 0);
for (i = 0; i < 1 << NEXCEPTS; i++) {
for (sr = i, j = 0; sr != 0; sr >>= 1, j++)
std_except_sets[i] |= std_excepts[j] & ((~sr & 1) - 1);
}
}
/*
* Raise a floating-point exception without relying on the standard
* library routines, which we are trying to test.
*
* XXX We can't raise an {over,under}flow without also raising an
* inexact exception.
*/
static void
raiseexcept(int excepts)
{
volatile double d;
/*
* With a compiler that supports the FENV_ACCESS pragma
* properly, simple expressions like '0.0 / 0.0' should
* be sufficient to generate traps. Unfortunately, we
* need to bring a volatile variable into the equation
* to prevent incorrect optimizations.
*/
if (excepts & FE_INVALID) {
d = 0.0;
d = 0.0 / d;
}
if (excepts & FE_DIVBYZERO) {
d = 0.0;
d = 1.0 / d;
}
if (excepts & FE_OVERFLOW) {
d = DBL_MAX;
d *= 2.0;
}
if (excepts & FE_UNDERFLOW) {
d = DBL_MIN;
d /= DBL_MAX;
}
if (excepts & FE_INEXACT) {
d = DBL_MIN;
d += 1.0;
}
/*
* On the x86 (and some other architectures?) the FPU and
* integer units are decoupled. We need to execute an FWAIT
* or a floating-point instruction to get synchronous exceptions.
*/
d = 1.0;
d += 1.0;
}
/*
* Determine the current rounding mode without relying on the fenv
* routines. This function may raise an inexact exception.
*/
static int
getround(void)
{
volatile double d, e;
/*
* This test works just as well with 0.0 - 0.0, except on ia64
* where 0.0 - 0.0 gives the wrong sign when rounding downwards.
* For ia32 use a volatile double to force 64 bit rounding.
* Otherwise the i387 would use its internal 80 bit stack.
*/
d = 1.0;
d -= 1.0;
if (copysign(1.0, d) < 0.0)
return (FE_DOWNWARD);
d = 1.0;
e = d + (DBL_EPSILON * 3.0 / 4.0);
if (e == 1.0)
return (FE_TOWARDZERO);
e = d + (DBL_EPSILON * 1.0 / 4.0);
if (e > 1.0)
return (FE_UPWARD);
return (FE_TONEAREST);
}
static void
trap_handler(int sig)
{
ATF_CHECK_EQ(SIGFPE, sig);
_exit(0);
}
/*
* This tests checks the default FP environment, so it must be first.
* The memcmp() test below may be too much to ask for, since there
* could be multiple machine-specific default environments.
*/
ATF_TC_WITHOUT_HEAD(dfl_env);
ATF_TC_BODY(dfl_env, tc)
{
#ifndef NO_STRICT_DFL_ENV
fenv_t env;
fegetenv(&env);
/* Print the default environment for debugging purposes. */
hexdump(&env, sizeof(env), "current fenv ", HD_OMIT_CHARS);
hexdump(FE_DFL_ENV, sizeof(env), "default fenv ", HD_OMIT_CHARS);
CHECK_FP_EXCEPTIONS(0, FE_ALL_EXCEPT);
#ifdef __amd64__
/*
* Compare the fields that the AMD [1] and Intel [2] specs say will be
* set once fnstenv returns.
*
* Not all amd64 capable processors implement the fnstenv instruction
* by zero'ing out the env.__x87.__other field (example: AMD Opteron
* 6308). The AMD64/x64 specs aren't explicit on what the
* env.__x87.__other field will contain after fnstenv is executed, so
* the values in env.__x87.__other could be filled with arbitrary
* data depending on how the CPU implements fnstenv.
*
* 1. http://support.amd.com/TechDocs/26569_APM_v5.pdf
* 2. http://www.intel.com/Assets/en_US/PDF/manual/253666.pdf
*/
ATF_CHECK(memcmp(&env.__mxcsr, &FE_DFL_ENV->__mxcsr,
sizeof(env.__mxcsr)) == 0);
ATF_CHECK(memcmp(&env.__x87.__control, &FE_DFL_ENV->__x87.__control,
sizeof(env.__x87.__control)) == 0);
ATF_CHECK(memcmp(&env.__x87.__status, &FE_DFL_ENV->__x87.__status,
sizeof(env.__x87.__status)) == 0);
ATF_CHECK(memcmp(&env.__x87.__tag, &FE_DFL_ENV->__x87.__tag,
sizeof(env.__x87.__tag)) == 0);
#else
ATF_CHECK_EQ(0, memcmp(&env, FE_DFL_ENV, sizeof(env)));
#endif
#endif
CHECK_FP_EXCEPTIONS(0, FE_ALL_EXCEPT);
}
/*
* Test fetestexcept() and feclearexcept().
*/
ATF_TC_WITHOUT_HEAD(fetestclearexcept);
ATF_TC_BODY(fetestclearexcept, tc)
{
int excepts, i;
for (i = 0; i < 1 << NEXCEPTS; i++)
ATF_CHECK_EQ(0, fetestexcept(std_except_sets[i]));
for (i = 0; i < 1 << NEXCEPTS; i++) {
excepts = std_except_sets[i];
/* FE_ALL_EXCEPT might be special-cased, as on i386. */
raiseexcept(excepts);
ATF_CHECK_EQ(excepts, fetestexcept(excepts));
ATF_REQUIRE_EQ(0, feclearexcept(FE_ALL_EXCEPT));
ATF_CHECK_EQ(0, fetestexcept(FE_ALL_EXCEPT));
raiseexcept(excepts);
ATF_CHECK_EQ(excepts, fetestexcept(excepts));
if ((excepts & (FE_UNDERFLOW | FE_OVERFLOW)) != 0) {
excepts |= FE_INEXACT;
ATF_CHECK_EQ(excepts, (fetestexcept(ALL_STD_EXCEPT) | FE_INEXACT));
} else {
ATF_CHECK_EQ(excepts, fetestexcept(ALL_STD_EXCEPT));
}
ATF_CHECK_EQ(0, feclearexcept(excepts));
ATF_CHECK_EQ(0, fetestexcept(ALL_STD_EXCEPT));
}
}
/*
* Test fegetexceptflag() and fesetexceptflag().
*
* Prerequisites: fetestexcept(), feclearexcept()
*/
ATF_TC_WITHOUT_HEAD(fegsetexceptflag);
ATF_TC_BODY(fegsetexceptflag, tc)
{
fexcept_t flag;
int excepts, i;
CHECK_FP_EXCEPTIONS(0, FE_ALL_EXCEPT);
for (i = 0; i < 1 << NEXCEPTS; i++) {
excepts = std_except_sets[i];
ATF_CHECK_EQ(0, fegetexceptflag(&flag, excepts));
raiseexcept(ALL_STD_EXCEPT);
ATF_CHECK_EQ(0, fesetexceptflag(&flag, excepts));
ATF_CHECK_EQ((ALL_STD_EXCEPT ^ excepts), fetestexcept(ALL_STD_EXCEPT));
ATF_CHECK_EQ(0, fegetexceptflag(&flag, FE_ALL_EXCEPT));
ATF_REQUIRE_EQ(0, feclearexcept(FE_ALL_EXCEPT));
ATF_CHECK_EQ(0, fesetexceptflag(&flag, excepts));
ATF_CHECK_EQ(0, fetestexcept(ALL_STD_EXCEPT));
ATF_CHECK_EQ(0, fesetexceptflag(&flag, ALL_STD_EXCEPT ^ excepts));
ATF_CHECK_EQ((ALL_STD_EXCEPT ^ excepts), fetestexcept(ALL_STD_EXCEPT));
ATF_REQUIRE_EQ(0, feclearexcept(FE_ALL_EXCEPT));
}
}
/*
* Test feraiseexcept().
*
* Prerequisites: fetestexcept(), feclearexcept()
*/
ATF_TC_WITHOUT_HEAD(feraiseexcept);
ATF_TC_BODY(feraiseexcept, tc)
{
int excepts, i;
for (i = 0; i < 1 << NEXCEPTS; i++) {
excepts = std_except_sets[i];
ATF_CHECK_EQ(0, fetestexcept(FE_ALL_EXCEPT));
ATF_CHECK_EQ(0, feraiseexcept(excepts));
if ((excepts & (FE_UNDERFLOW | FE_OVERFLOW)) != 0) {
excepts |= FE_INEXACT;
ATF_CHECK_EQ(excepts, (fetestexcept(ALL_STD_EXCEPT) | FE_INEXACT));
} else {
ATF_CHECK_EQ(excepts, fetestexcept(ALL_STD_EXCEPT));
}
ATF_REQUIRE_EQ(0, feclearexcept(FE_ALL_EXCEPT));
}
ATF_CHECK_EQ(0, feraiseexcept(FE_INVALID | FE_DIVBYZERO));
ATF_CHECK_EQ((FE_INVALID | FE_DIVBYZERO), fetestexcept(ALL_STD_EXCEPT));
ATF_CHECK_EQ(0, feraiseexcept(FE_OVERFLOW | FE_UNDERFLOW | FE_INEXACT));
ATF_CHECK_EQ(ALL_STD_EXCEPT, fetestexcept(ALL_STD_EXCEPT));
ATF_REQUIRE_EQ(0, feclearexcept(FE_ALL_EXCEPT));
}
/*
* Test fegetround() and fesetround().
*/
ATF_TC_WITHOUT_HEAD(fegsetround);
ATF_TC_BODY(fegsetround, tc)
{
ATF_CHECK_EQ(FE_TONEAREST, fegetround());
ATF_CHECK_EQ(FE_TONEAREST, getround());
ATF_CHECK_EQ(1, FLT_ROUNDS);
ATF_CHECK_EQ(0, fesetround(FE_DOWNWARD));
ATF_CHECK_EQ(FE_DOWNWARD, fegetround());
ATF_CHECK_EQ(FE_DOWNWARD, getround());
ATF_CHECK_EQ(3, FLT_ROUNDS);
ATF_CHECK_EQ(0, fesetround(FE_UPWARD));
ATF_CHECK_EQ(FE_UPWARD, getround());
ATF_CHECK_EQ(FE_UPWARD, fegetround());
ATF_CHECK_EQ(2, FLT_ROUNDS);
ATF_CHECK_EQ(0, fesetround(FE_TOWARDZERO));
ATF_CHECK_EQ(FE_TOWARDZERO, getround());
ATF_CHECK_EQ(FE_TOWARDZERO, fegetround());
ATF_CHECK_EQ(0, FLT_ROUNDS);
ATF_CHECK_EQ(0, fesetround(FE_TONEAREST));
ATF_CHECK_EQ(FE_TONEAREST, getround());
ATF_CHECK_EQ(1, FLT_ROUNDS);
ATF_REQUIRE_EQ(0, feclearexcept(FE_ALL_EXCEPT));
}
/*
* Test fegetenv() and fesetenv().
*
* Prerequisites: fetestexcept(), feclearexcept(), fegetround(), fesetround()
*/
ATF_TC_WITHOUT_HEAD(fegsetenv);
ATF_TC_BODY(fegsetenv, tc)
{
fenv_t env1, env2;
int excepts, i;
for (i = 0; i < 1 << NEXCEPTS; i++) {
excepts = std_except_sets[i];
ATF_CHECK_EQ(0, fetestexcept(FE_ALL_EXCEPT));
ATF_CHECK_EQ(FE_TONEAREST, fegetround());
ATF_CHECK_EQ(0, fegetenv(&env1));
/*
* fe[gs]etenv() should be able to save and restore
* exception flags without the spurious inexact
* exceptions that afflict raiseexcept().
*/
raiseexcept(excepts);
if ((excepts & (FE_UNDERFLOW | FE_OVERFLOW)) != 0 &&
(excepts & FE_INEXACT) == 0)
ATF_CHECK_EQ(0, feclearexcept(FE_INEXACT));
fesetround(FE_DOWNWARD);
ATF_CHECK_EQ(0, fegetenv(&env2));
ATF_CHECK_EQ(0, fesetenv(&env1));
ATF_CHECK_EQ(0, fetestexcept(FE_ALL_EXCEPT));
ATF_CHECK_EQ(FE_TONEAREST, fegetround());
ATF_CHECK_EQ(0, fesetenv(&env2));
/*
* Some platforms like powerpc may set extra exception bits. Since
* only standard exceptions are tested, mask against ALL_STD_EXCEPT
*/
ATF_CHECK_EQ(excepts, (fetestexcept(FE_ALL_EXCEPT) & ALL_STD_EXCEPT));
ATF_CHECK_EQ(FE_DOWNWARD, fegetround());
ATF_CHECK_EQ(0, fesetenv(&env1));
ATF_CHECK_EQ(0, fetestexcept(FE_ALL_EXCEPT));
ATF_CHECK_EQ(FE_TONEAREST, fegetround());
}
}
/*
* Test fegetexcept(), fedisableexcept(), and feenableexcept().
*
* Prerequisites: fetestexcept(), feraiseexcept()
*/
ATF_TC_WITHOUT_HEAD(masking);
ATF_TC_BODY(masking, tc)
{
#if !defined(__arm__) && !defined(__aarch64__) && !defined(__riscv)
struct sigaction act;
int except, pass, raise, status;
unsigned i;
ATF_REQUIRE_EQ(0, (fegetexcept() & ALL_STD_EXCEPT));
/*
* Some CPUs, e.g. AArch64 QEMU does not support trapping on FP
* exceptions. In that case the trap enable bits are all RAZ/WI, so
* writing to those bits will be ignored and the the next read will
* return all zeroes for those bits. Skip the test if no floating
* point exceptions are supported and mark it XFAIL if some are missing.
*/
ATF_REQUIRE_EQ(0, (feenableexcept(FE_ALL_EXCEPT)));
except = fegetexcept();
if (except == 0) {
atf_tc_skip("CPU does not support trapping on floating point "
"exceptions.");
} else if ((except & ALL_STD_EXCEPT) != ALL_STD_EXCEPT) {
atf_tc_expect_fail("Not all floating point exceptions can be "
"set to trap: %#x vs %#x", except, ALL_STD_EXCEPT);
}
fedisableexcept(FE_ALL_EXCEPT);
ATF_CHECK_EQ(0, (feenableexcept(FE_INVALID|FE_OVERFLOW) & ALL_STD_EXCEPT));
ATF_CHECK_EQ((FE_INVALID | FE_OVERFLOW), (feenableexcept(FE_UNDERFLOW) & ALL_STD_EXCEPT));
ATF_CHECK_EQ((FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW), (fedisableexcept(FE_OVERFLOW) & ALL_STD_EXCEPT));
ATF_CHECK_EQ((FE_INVALID | FE_UNDERFLOW), (fegetexcept() & ALL_STD_EXCEPT));
ATF_CHECK_EQ((FE_INVALID | FE_UNDERFLOW), (fedisableexcept(FE_ALL_EXCEPT) & ALL_STD_EXCEPT));
ATF_CHECK_EQ(0, (fegetexcept() & ALL_STD_EXCEPT));
sigemptyset(&act.sa_mask);
act.sa_flags = 0;
act.sa_handler = trap_handler;
for (pass = 0; pass < 2; pass++) {
for (i = 0; i < NEXCEPTS; i++) {
except = std_excepts[i];
/* over/underflow may also raise inexact */
if (except == FE_INEXACT)
raise = FE_DIVBYZERO | FE_INVALID;
else
raise = ALL_STD_EXCEPT ^ except;
/*
* We need to fork a child process because
* there isn't a portable way to recover from
* a floating-point exception.
*/
switch(fork()) {
case 0: /* child */
ATF_CHECK_EQ(0, (fegetexcept() & ALL_STD_EXCEPT));
ATF_REQUIRE_EQ(0, (feenableexcept(except) & ALL_STD_EXCEPT));
ATF_CHECK_EQ(except, fegetexcept());
raiseexcept(raise);
ATF_CHECK_EQ(0, feraiseexcept(raise));
ATF_CHECK_EQ(raise, fetestexcept(ALL_STD_EXCEPT));
ATF_CHECK_EQ(0, sigaction(SIGFPE, &act, NULL));
switch (pass) {
case 0:
raiseexcept(except);
case 1:
feraiseexcept(except);
default:
ATF_REQUIRE(0);
}
ATF_REQUIRE(0);
default: /* parent */
ATF_REQUIRE(wait(&status) > 0);
/*
* Avoid assert() here so that it's possible
* to examine a failed child's core dump.
*/
if (!WIFEXITED(status))
errx(1, "child aborted\n");
ATF_CHECK_EQ(0, WEXITSTATUS(status));
break;
case -1: /* error */
ATF_REQUIRE(0);
}
}
}
ATF_CHECK_EQ(0, fetestexcept(FE_ALL_EXCEPT));
#endif
}
/*
* Test feholdexcept() and feupdateenv().
*
* Prerequisites: fetestexcept(), fegetround(), fesetround(),
* fedisableexcept(), feenableexcept()
*/
ATF_TC_WITHOUT_HEAD(feholdupdate);
ATF_TC_BODY(feholdupdate, tc)
{
fenv_t env;
struct sigaction act;
int except, pass, status, raise;
unsigned i;
sigemptyset(&act.sa_mask);
act.sa_flags = 0;
act.sa_handler = trap_handler;
for (pass = 0; pass < 2; pass++) {
for (i = 0; i < NEXCEPTS; i++) {
except = std_excepts[i];
/* over/underflow may also raise inexact */
if (except == FE_INEXACT)
raise = FE_DIVBYZERO | FE_INVALID;
else
raise = ALL_STD_EXCEPT ^ except;
/*
* We need to fork a child process because
* there isn't a portable way to recover from
* a floating-point exception.
*/
switch(fork()) {
case 0: /* child */
/*
* We don't want to cause a fatal exception in
* the child until the second pass, so we can
* check other properties of feupdateenv().
*/
if (pass == 1)
ATF_REQUIRE_EQ(0,
feenableexcept(except) &
ALL_STD_EXCEPT);
raiseexcept(raise);
ATF_CHECK_EQ(0, fesetround(FE_DOWNWARD));
ATF_CHECK_EQ(0, feholdexcept(&env));
ATF_CHECK_EQ(0, fetestexcept(FE_ALL_EXCEPT));
raiseexcept(except);
ATF_CHECK_EQ(0, fesetround(FE_UPWARD));
if (pass == 1)
ATF_CHECK_EQ(0, sigaction(SIGFPE, &act, NULL));
ATF_CHECK_EQ(0, feupdateenv(&env));
ATF_CHECK_EQ(FE_DOWNWARD, fegetround());
ATF_CHECK_EQ((except | raise), fetestexcept(ALL_STD_EXCEPT));
ATF_CHECK_EQ(0, pass);
_exit(0);
default: /* parent */
ATF_REQUIRE(wait(&status) > 0);
/*
* Avoid assert() here so that it's possible
* to examine a failed child's core dump.
*/
if (!WIFEXITED(status))
errx(1, "child aborted\n");
ATF_CHECK_EQ(0, WEXITSTATUS(status));
break;
case -1: /* error */
ATF_REQUIRE(0);
}
}
#if defined(__arm__) || defined(__aarch64__) || defined(__riscv)
break;
#endif
}
ATF_CHECK_EQ(0, fetestexcept(FE_ALL_EXCEPT));
}
ATF_TP_ADD_TCS(tp)
{
ATF_TP_ADD_TC(tp, dfl_env);
ATF_TP_ADD_TC(tp, fetestclearexcept);
ATF_TP_ADD_TC(tp, fegsetexceptflag);
ATF_TP_ADD_TC(tp, feraiseexcept);
ATF_TP_ADD_TC(tp, fegsetround);
ATF_TP_ADD_TC(tp, fegsetenv);
ATF_TP_ADD_TC(tp, masking);
ATF_TP_ADD_TC(tp, feholdupdate);
return (atf_no_error());
}
|
ba68b55c351ea21ff68a2a58139bdbdc65dcae64
|
f79dec3c4033ca3cbb55d8a51a748cc7b8b6fbab
|
/net/xl2tpd/patches/patch-network.c
|
3f44457001713e646ddb51b0096362a0efb4e48f
|
[] |
no_license
|
jsonn/pkgsrc
|
fb34c4a6a2d350e8e415f3c4955d4989fcd86881
|
c1514b5f4a3726d90e30aa16b0c209adbc276d17
|
refs/heads/trunk
| 2021-01-24T09:10:01.038867
| 2017-07-07T15:49:43
| 2017-07-07T15:49:43
| 2,095,004
| 106
| 47
| null | 2016-09-19T09:26:01
| 2011-07-23T23:49:04
|
Makefile
|
UTF-8
|
C
| false
| false
| 4,558
|
c
|
patch-network.c
|
$NetBSD: patch-network.c,v 1.4 2015/02/19 22:27:59 joerg Exp $
Handle not having IP_PKTINFO
Handle not having SO_NO_CHECK
Don't set control buf if controllen == 0
Avoid pointer aliasing issue and fix test that was done in the wrong
byte order
--- network.c.orig 2014-01-16 22:02:04.000000000 +0000
+++ network.c
@@ -85,24 +85,26 @@ int init_network (void)
gconfig.ipsecsaref=0;
}
-
- arg=1;
- if(setsockopt(server_socket, IPPROTO_IP, IP_PKTINFO, (char*)&arg, sizeof(arg)) != 0) {
- l2tp_log(LOG_CRIT, "setsockopt IP_PKTINFO: %s\n", strerror(errno));
- }
#else
{
l2tp_log(LOG_INFO, "No attempt being made to use IPsec SAref's since we're not on a Linux machine.\n");
}
-
+#endif
+#ifdef IP_PKTINFO
+ arg=1;
+ if(setsockopt(server_socket, IPPROTO_IP, IP_PKTINFO, (char*)&arg, sizeof(arg)) != 0) {
+ l2tp_log(LOG_CRIT, "setsockopt IP_PKTINFO: %s\n", strerror(errno));
+ }
#endif
/* turn off UDP checksums */
+#ifdef SO_NO_CHECK
arg=1;
if (setsockopt(server_socket, SOL_SOCKET, SO_NO_CHECK , (void*)&arg,
sizeof(arg)) ==-1) {
l2tp_log(LOG_INFO, "unable to turn off UDP checksums");
}
+#endif
#ifdef USE_KERNEL
if (gconfig.forceuserspace)
@@ -135,7 +137,7 @@ int init_network (void)
return 0;
}
-inline void extract (void *buf, int *tunnel, int *call)
+static inline void extract (void *buf, int *tunnel, int *call)
{
/*
* Extract the tunnel and call #'s, and fix the order of the
@@ -155,15 +157,13 @@ inline void extract (void *buf, int *tun
}
}
-inline void fix_hdr (void *buf)
+static inline void fix_hdr (void *buf)
{
/*
* Fix the byte order of the header
*/
-
- struct payload_hdr *p = (struct payload_hdr *) buf;
- _u16 ver = ntohs (p->ver);
- if (CTBIT (p->ver))
+ _u16 ver = ntohs (*(_u16 *)buf);
+ if (CTBIT (ver))
{
/*
* Control headers are always
@@ -280,12 +280,18 @@ void control_xmit (void *b)
void udp_xmit (struct buffer *buf, struct tunnel *t)
{
struct cmsghdr *cmsg;
- char cbuf[CMSG_SPACE(sizeof (unsigned int) + sizeof (struct in_pktinfo))];
+ char cbuf[CMSG_SPACE(sizeof (unsigned int)
+#ifdef IP_PKTINFO
+ + sizeof (struct in_pktinfo)
+#endif
+ )];
unsigned int *refp;
struct msghdr msgh;
int err;
struct iovec iov;
+#ifdef IP_PKTINFO
struct in_pktinfo *pktinfo;
+#endif
int finallen;
/*
@@ -312,7 +318,7 @@ void udp_xmit (struct buffer *buf, struc
finallen = cmsg->cmsg_len;
}
-
+#ifdef IP_PKTINFO
if (t->my_addr.ipi_addr.s_addr){
if ( ! cmsg) {
@@ -331,7 +337,9 @@ void udp_xmit (struct buffer *buf, struc
finallen += cmsg->cmsg_len;
}
-
+#endif
+ if (finallen == 0)
+ msgh.msg_control = NULL;
msgh.msg_controllen = finallen;
iov.iov_base = buf->start;
@@ -426,7 +434,9 @@ void network_thread ()
* our network socket. Control handling is no longer done here.
*/
struct sockaddr_in from;
+#ifdef IP_PKTINFO
struct in_pktinfo to;
+#endif
unsigned int fromlen;
int tunnel, call; /* Tunnel and call */
int recvsize; /* Length of data received */
@@ -506,7 +516,9 @@ void network_thread ()
buf->len -= PAYLOAD_BUF;
memset(&from, 0, sizeof(from));
+#ifdef IP_PKTINFO
memset(&to, 0, sizeof(to));
+#endif
fromlen = sizeof(from);
@@ -557,13 +569,16 @@ void network_thread ()
for (cmsg = CMSG_FIRSTHDR(&msgh);
cmsg != NULL;
cmsg = CMSG_NXTHDR(&msgh,cmsg)) {
+#ifdef IP_PKTINFO
/* extract destination(our) addr */
if (cmsg->cmsg_level == IPPROTO_IP && cmsg->cmsg_type == IP_PKTINFO) {
struct in_pktinfo* pktInfo = ((struct in_pktinfo*)CMSG_DATA(cmsg));
to = *pktInfo;
+ continue;
}
+#endif
/* extract IPsec info out */
- else if (gconfig.ipsecsaref && cmsg->cmsg_level == IPPROTO_IP
+ if (gconfig.ipsecsaref && cmsg->cmsg_level == IPPROTO_IP
&& cmsg->cmsg_type == gconfig.sarefnum) {
unsigned int *refp;
@@ -592,6 +607,8 @@ void network_thread ()
if (gconfig.packet_dump)
{
+ struct payload_hdr *p = (struct payload_hdr *) buf->start;
+ l2tp_log(LOG_DEBUG, "ver = 0x%x\n", p->ver);
do_packet_dump (buf);
}
if (!
@@ -627,9 +644,11 @@ void network_thread ()
}
else
{
+#ifdef IP_PKTINFO
if (c->container) {
c->container->my_addr = to;
}
+#endif
buf->peer = from;
/* Handle the packet */
|
66994b4a9c9e1dea984381d4d217920341c2ed43
|
79d343002bb63a44f8ab0dbac0c9f4ec54078c3a
|
/lib/libc/wasi/libc-top-half/musl/src/string/strdup.c
|
d4c274494f654e2fe04ce7384431f5dfdb817c03
|
[
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-musl-exception",
"MIT",
"LLVM-exception",
"Apache-2.0",
"NCSA"
] |
permissive
|
ziglang/zig
|
4aa75d8d3bcc9e39bf61d265fd84b7f005623fc5
|
f4c9e19bc3213c2bc7e03d7b06d7129882f39f6c
|
refs/heads/master
| 2023-08-31T13:16:45.980913
| 2023-08-31T05:50:29
| 2023-08-31T05:50:29
| 40,276,274
| 25,560
| 2,399
|
MIT
| 2023-09-14T21:09:50
| 2015-08-06T00:51:28
|
Zig
|
UTF-8
|
C
| false
| false
| 169
|
c
|
strdup.c
|
#include <stdlib.h>
#include <string.h>
char *strdup(const char *s)
{
size_t l = strlen(s);
char *d = malloc(l+1);
if (!d) return NULL;
return memcpy(d, s, l+1);
}
|
30003f8aecd5ad97e399b548e45db674c24be9b6
|
331640994b1b6f66c1639278571ddbdc6c8c0751
|
/src/java/nxt_jni_OutputStream.c
|
170b33ba937c2cf51ecef2d9c1a114f5e98dff62
|
[
"Apache-2.0"
] |
permissive
|
nginx/unit
|
eabcd067eaa60f4bdcf0cfaffe7d9932add2c66a
|
9b22b6957bc87b3df002d0bc691fdae6a20abdac
|
refs/heads/master
| 2023-09-04T02:02:13.581700
| 2023-08-30T16:07:24
| 2023-08-30T16:07:24
| 102,627,638
| 4,649
| 452
|
Apache-2.0
| 2023-09-12T01:28:22
| 2017-09-06T15:45:30
|
C
|
UTF-8
|
C
| false
| false
| 5,681
|
c
|
nxt_jni_OutputStream.c
|
/*
* Copyright (C) NGINX, Inc.
*/
#include <nxt_auto_config.h>
#include <jni.h>
#include <nxt_unit.h>
#include "nxt_jni.h"
#include "nxt_jni_OutputStream.h"
#include "nxt_jni_URLClassLoader.h"
static void JNICALL nxt_java_OutputStream_writeByte(JNIEnv *env, jclass cls,
jlong req_info_ptr, jint b);
static nxt_unit_buf_t *nxt_java_OutputStream_req_buf(JNIEnv *env,
nxt_unit_request_info_t *req);
static void JNICALL nxt_java_OutputStream_write(JNIEnv *env, jclass cls,
jlong req_info_ptr, jarray b, jint off, jint len);
static void JNICALL nxt_java_OutputStream_flush(JNIEnv *env, jclass cls,
jlong req_info_ptr);
static void JNICALL nxt_java_OutputStream_close(JNIEnv *env, jclass cls,
jlong req_info_ptr);
static jclass nxt_java_OutputStream_class;
int
nxt_java_initOutputStream(JNIEnv *env, jobject cl)
{
int res;
jclass cls;
cls = nxt_java_loadClass(env, cl, "nginx.unit.OutputStream");
if (cls == NULL) {
return NXT_UNIT_ERROR;
}
nxt_java_OutputStream_class = (*env)->NewGlobalRef(env, cls);
(*env)->DeleteLocalRef(env, cls);
cls = nxt_java_OutputStream_class;
JNINativeMethod os_methods[] = {
{ (char *) "write",
(char *) "(JI)V",
nxt_java_OutputStream_writeByte },
{ (char *) "write",
(char *) "(J[BII)V",
nxt_java_OutputStream_write },
{ (char *) "flush",
(char *) "(J)V",
nxt_java_OutputStream_flush },
{ (char *) "close",
(char *) "(J)V",
nxt_java_OutputStream_close },
};
res = (*env)->RegisterNatives(env, nxt_java_OutputStream_class,
os_methods,
sizeof(os_methods) / sizeof(os_methods[0]));
nxt_unit_debug(NULL, "registered OutputStream methods: %d", res);
if (res != 0) {
(*env)->DeleteGlobalRef(env, cls);
return NXT_UNIT_ERROR;
}
return NXT_UNIT_OK;
}
static void JNICALL
nxt_java_OutputStream_writeByte(JNIEnv *env, jclass cls, jlong req_info_ptr,
jint b)
{
nxt_unit_buf_t *buf;
nxt_unit_request_info_t *req;
nxt_java_request_data_t *data;
req = nxt_jlong2ptr(req_info_ptr);
data = req->data;
buf = nxt_java_OutputStream_req_buf(env, req);
if (buf == NULL) {
return;
}
*buf->free++ = b;
if ((uint32_t) (buf->free - buf->start) >= data->buf_size) {
nxt_java_OutputStream_flush_buf(env, req);
}
}
int
nxt_java_OutputStream_flush_buf(JNIEnv *env, nxt_unit_request_info_t *req)
{
int rc;
nxt_java_request_data_t *data;
data = req->data;
if (!nxt_unit_response_is_init(req)) {
rc = nxt_unit_response_init(req, 200, 0, 0);
if (rc != NXT_UNIT_OK) {
nxt_java_throw_IOException(env, "Failed to allocate response");
return rc;
}
}
if (!nxt_unit_response_is_sent(req)) {
rc = nxt_unit_response_send(req);
if (rc != NXT_UNIT_OK) {
nxt_java_throw_IOException(env, "Failed to send response headers");
return rc;
}
}
if (data->buf != NULL) {
rc = nxt_unit_buf_send(data->buf);
if (rc != NXT_UNIT_OK) {
nxt_java_throw_IOException(env, "Failed to send buffer");
} else {
data->buf = NULL;
}
} else {
rc = NXT_UNIT_OK;
}
return rc;
}
static nxt_unit_buf_t *
nxt_java_OutputStream_req_buf(JNIEnv *env, nxt_unit_request_info_t *req)
{
uint32_t size;
nxt_unit_buf_t *buf;
nxt_java_request_data_t *data;
data = req->data;
buf = data->buf;
if (buf == NULL || buf->free >= buf->end) {
size = data->buf_size == 0 ? nxt_unit_buf_min() : data->buf_size;
buf = nxt_unit_response_buf_alloc(req, size);
if (buf == NULL) {
nxt_java_throw_IOException(env, "Failed to allocate buffer");
return NULL;
}
data->buf = buf;
}
return buf;
}
static void JNICALL
nxt_java_OutputStream_write(JNIEnv *env, jclass cls, jlong req_info_ptr,
jarray b, jint off, jint len)
{
int rc;
jint copy;
uint8_t *ptr;
nxt_unit_buf_t *buf;
nxt_unit_request_info_t *req;
nxt_java_request_data_t *data;
req = nxt_jlong2ptr(req_info_ptr);
data = req->data;
ptr = (*env)->GetPrimitiveArrayCritical(env, b, NULL);
while (len > 0) {
buf = nxt_java_OutputStream_req_buf(env, req);
if (buf == NULL) {
return;
}
copy = buf->end - buf->free;
copy = copy < len ? copy : len;
memcpy(buf->free, ptr + off, copy);
buf->free += copy;
len -= copy;
off += copy;
if ((uint32_t) (buf->free - buf->start) >= data->buf_size) {
rc = nxt_java_OutputStream_flush_buf(env, req);
if (rc != NXT_UNIT_OK) {
break;
}
}
}
(*env)->ReleasePrimitiveArrayCritical(env, b, ptr, 0);
}
static void JNICALL
nxt_java_OutputStream_flush(JNIEnv *env, jclass cls, jlong req_info_ptr)
{
nxt_unit_request_info_t *req;
nxt_java_request_data_t *data;
req = nxt_jlong2ptr(req_info_ptr);
data = req->data;
if (data->buf != NULL && data->buf->free > data->buf->start) {
nxt_java_OutputStream_flush_buf(env, req);
}
}
static void JNICALL
nxt_java_OutputStream_close(JNIEnv *env, jclass cls, jlong req_info_ptr)
{
nxt_java_OutputStream_flush_buf(env, nxt_jlong2ptr(req_info_ptr));
}
|
2c9acb25f1526511f63418dac8a6a42ea3ee282f
|
a11e4bdd6157b22067288d860507fa20abea033d
|
/src/modules/conf_theme/e_int_config_borders.c
|
6d07d36e6d50166d085b1294d08bb241354677da
|
[
"BSD-2-Clause"
] |
permissive
|
JeffHoogland/moksha
|
85a0f39dc97daa61d35e64a511013d21552db288
|
c56013644d8cd8e32101bb38a8d1e4ebd9d47f37
|
refs/heads/master
| 2023-08-24T05:39:13.415948
| 2023-08-22T19:49:32
| 2023-08-22T19:49:32
| 34,704,822
| 179
| 51
|
NOASSERTION
| 2023-09-06T14:41:31
| 2015-04-28T02:59:19
|
C
|
UTF-8
|
C
| false
| false
| 8,215
|
c
|
e_int_config_borders.c
|
#include "e.h"
static E_Config_Dialog_View *_config_view_new(void);
static void *_create_data(E_Config_Dialog *cfd);
static void _free_data(E_Config_Dialog *cfd, E_Config_Dialog_Data *cfdata);
static int _basic_apply(E_Config_Dialog *cfd, E_Config_Dialog_Data *cfdata);
static Evas_Object *_basic_create(E_Config_Dialog *cfd, Evas *evas, E_Config_Dialog_Data *cfdata);
static int _basic_check_changed(E_Config_Dialog *cfd, E_Config_Dialog_Data *cfdata);
static void _fill_data(E_Config_Dialog_Data *cfdata);
static void _basic_apply_border(E_Config_Dialog_Data *cfdata);
struct _E_Config_Dialog_Data
{
E_Border *border;
E_Container *container;
const char *bordername;
int remember_border;
};
E_Config_Dialog *
e_int_config_borders(E_Container *con, const char *params __UNUSED__)
{
E_Config_Dialog *cfd;
E_Config_Dialog_View *v;
if (e_config_dialog_find("E", "appearance/borders")) return NULL;
v = _config_view_new();
if (!v) return NULL;
cfd = e_config_dialog_new(con, _("Default Border Style"),
"E", "appearance/borders",
"preferences-system-windows", 0, v, con);
return cfd;
}
E_Config_Dialog *
e_int_config_borders_border(E_Container *con __UNUSED__, const char *params)
{
E_Config_Dialog *cfd;
E_Config_Dialog_View *v;
E_Border *bd;
if (!params) return NULL;
bd = NULL;
sscanf(params, "%p", &bd);
if (!bd) return NULL;
v = _config_view_new();
if (!v) return NULL;
cfd = e_config_dialog_new(bd->zone->container,
_("Window Border Selection"),
"E", "_config_border_border_style_dialog",
"preferences-system-windows", 0, v, bd);
bd->border_border_dialog = cfd;
return cfd;
}
static E_Config_Dialog_View *
_config_view_new(void)
{
E_Config_Dialog_View *v;
v = E_NEW(E_Config_Dialog_View, 1);
if (!v) return NULL;
v->create_cfdata = _create_data;
v->free_cfdata = _free_data;
v->basic.create_widgets = _basic_create;
v->basic.apply_cfdata = _basic_apply;
v->basic.check_changed = _basic_check_changed;
v->override_auto_apply = 1;
return v;
}
static void *
_create_data(E_Config_Dialog *cfd)
{
E_Config_Dialog_Data *cfdata;
cfdata = E_NEW(E_Config_Dialog_Data, 1);
cfdata->container = NULL;
cfdata->border = NULL;
if (E_OBJECT(cfd->data)->type == E_CONTAINER_TYPE)
cfdata->container = cfd->data;
else
cfdata->border = cfd->data;
_fill_data(cfdata);
return cfdata;
}
static void
_fill_data(E_Config_Dialog_Data *cfdata)
{
if (cfdata->border)
{
if ((cfdata->border->remember) &&
(cfdata->border->remember->apply & E_REMEMBER_APPLY_BORDER))
{
cfdata->remember_border = 1;
}
cfdata->bordername = eina_stringshare_add(cfdata->border->client.border.name);
}
else
cfdata->bordername = eina_stringshare_add(e_config->theme_default_border_style);
}
static void
_free_data(E_Config_Dialog *cfd __UNUSED__, E_Config_Dialog_Data *cfdata)
{
if (cfdata->border)
cfdata->border->border_border_dialog = NULL;
eina_stringshare_del(cfdata->bordername);
E_FREE(cfdata);
}
static int
_basic_check_changed(E_Config_Dialog *cfd __UNUSED__, E_Config_Dialog_Data *cfdata)
{
Eina_Bool remch = ((cfdata->remember_border &&
!((cfdata->border->remember) &&
(cfdata->border->remember->apply & E_REMEMBER_APPLY_BORDER))) ||
(!cfdata->remember_border && cfdata->border &&
((cfdata->border->remember) &&
(cfdata->border->remember->apply & E_REMEMBER_APPLY_BORDER))));
if (cfdata->border)
return (cfdata->bordername != cfdata->border->client.border.name) || (remch);
else
return (cfdata->bordername != e_config->theme_default_border_style) || (remch);
}
static int
_basic_apply(E_Config_Dialog *cfd __UNUSED__, E_Config_Dialog_Data *cfdata)
{
if (cfdata->border)
_basic_apply_border(cfdata);
else if (cfdata->container)
{
Eina_List *l;
E_Border *bd;
eina_stringshare_replace(&e_config->theme_default_border_style, cfdata->bordername);
EINA_LIST_FOREACH(e_border_client_list(), l, bd)
{
bd->changed = 1;
bd->client.border.changed = 1;
}
}
e_config_save_queue();
return 1;
}
static void
_basic_apply_border(E_Config_Dialog_Data *cfdata)
{
if ((!cfdata->border->lock_border) && (!cfdata->border->shaded))
{
eina_stringshare_replace(&cfdata->border->bordername, cfdata->bordername);
cfdata->border->client.border.changed = 1;
cfdata->border->changed = 1;
}
if (cfdata->remember_border)
{
E_Remember *rem = cfdata->border->remember;
if (!rem)
{
rem = e_remember_new();
if (rem) e_remember_use(rem);
}
if (rem)
{
rem->apply |= E_REMEMBER_APPLY_BORDER;
e_remember_default_match_set(rem, cfdata->border);
eina_stringshare_replace(&rem->prop.border, cfdata->border->bordername);
cfdata->border->remember = rem;
e_remember_update(cfdata->border);
}
}
else
{
if (cfdata->border->remember)
{
cfdata->border->remember->apply &= ~E_REMEMBER_APPLY_BORDER;
if (cfdata->border->remember->apply == 0)
{
e_remember_unuse(cfdata->border->remember);
e_remember_del(cfdata->border->remember);
cfdata->border->remember = NULL;
}
}
}
}
static Evas_Object *
_basic_create(E_Config_Dialog *cfd, Evas *evas, E_Config_Dialog_Data *cfdata)
{
Evas_Object *o, *ol, *ob, *oj, *orect, *of;
Evas_Coord w, h;
Eina_List *borders;
int n = 1, sel = 0;
const char *str, *tmp;
e_dialog_resizable_set(cfd->dia, 1);
if (cfdata->border)
tmp = cfdata->border->client.border.name;
else
tmp = e_config->theme_default_border_style;
o = e_widget_list_add(evas, 0, 0);
of = e_widget_framelist_add(evas, _("Default Border Style"), 0);
e_widget_framelist_content_align_set(of, 0.0, 0.0);
ol = e_widget_ilist_add(evas, 96, 96, &(cfdata->bordername));
borders = e_theme_border_list();
orect = evas_object_rectangle_add(evas);
evas_object_color_set(orect, 0, 0, 0, 128);
evas_event_freeze(evas_object_evas_get(ol));
edje_freeze();
e_widget_ilist_freeze(ol);
e_widget_ilist_append(ol, orect, "borderless", NULL, NULL, "borderless");
EINA_LIST_FREE(borders, str)
{
char buf[4096];
ob = e_livethumb_add(evas);
e_livethumb_vsize_set(ob, 128, 128);
oj = edje_object_add(e_livethumb_evas_get(ob));
snprintf(buf, sizeof(buf), "e/widgets/border/%s/border", str);
e_theme_edje_object_set(oj, "base/theme/borders", buf);
e_livethumb_thumb_set(ob, oj);
orect = evas_object_rectangle_add(e_livethumb_evas_get(ob));
evas_object_color_set(orect, 0, 0, 0, 128);
evas_object_show(orect);
edje_object_part_swallow(oj, "e.swallow.client", orect);
e_widget_ilist_append(ol, ob, (char *)str, NULL, NULL, str);
if (tmp == str) sel = n;
n++;
eina_stringshare_del(str);
}
e_widget_size_min_get(ol, &w, &h);
e_widget_size_min_set(ol, w > 200 ? w : 200, 280);
e_widget_ilist_go(ol);
e_widget_ilist_selected_set(ol, sel);
e_widget_ilist_thaw(ol);
edje_thaw();
evas_event_thaw(evas_object_evas_get(ol));
e_widget_framelist_object_append(of, ol);
e_widget_list_object_append(o, of, 1, 1, 0.5);
if (cfdata->border)
{
ob = e_widget_check_add(evas, _("Remember this Border for this window next time it appears"),
&(cfdata->remember_border));
e_widget_list_object_append(o, ob, 1, 0, 0.0);
}
return o;
}
|
85c1a5062f965ff364413fd4c25d388f646a4950
|
de21f9075f55640514c29ef0f1fe3f0690845764
|
/regression/cbmc/array-cell-sensitivity14/test.c
|
ddec5e9b2959639e42b9a025e40083748dfd7510
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-4-Clause"
] |
permissive
|
diffblue/cbmc
|
975a074ac445febb3b5715f8792beb545522dc18
|
decd2839c2f51a54b2ad0f3e89fdc1b4bf78cd16
|
refs/heads/develop
| 2023-08-31T05:52:05.342195
| 2023-08-30T13:31:51
| 2023-08-30T13:31:51
| 51,877,056
| 589
| 309
|
NOASSERTION
| 2023-09-14T18:49:17
| 2016-02-16T23:03:52
|
C++
|
UTF-8
|
C
| false
| false
| 707
|
c
|
test.c
|
#include <assert.h>
struct A
{
int data;
struct A *children[2];
};
int main(int argc, char **argv)
{
struct A root;
struct A node1, node2;
root.children[0] = argc % 2 ? &node1 : &node2;
root.children[1] = argc % 3 ? &node1 : &node2;
node1.children[0] = argc % 5 ? &node1 : &node2;
node1.children[1] = argc % 7 ? &node1 : &node2;
node2.children[0] = argc % 11 ? &node1 : &node2;
node2.children[1] = argc % 13 ? &node1 : &node2;
int idx1 = 0, idx2 = 1, idx3 = 1, idx4 = 0;
root.children[idx1]->children[idx2]->children[idx3]->children[idx4]->data = 1;
assert(
root.children[idx1]->children[idx2]->children[idx3]->children[idx4]->data ==
1);
assert(node1.data == argc);
}
|
cac76139640093d13451d46a0c4a4f7da1c1ff57
|
d38ed5f31d74a79a054ed55dd9123a8d615283b9
|
/third_party/libgit2/src/userdiff.h
|
91c1f42dc716c98252392a1e4029d8428e3b3c55
|
[
"Apache-2.0",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"GCC-exception-2.0",
"LGPL-2.0-or-later",
"Zlib",
"LGPL-2.1-or-later",
"LGPL-2.1-only",
"ISC",
"MIT"
] |
permissive
|
chigraph/chigraph
|
8ff76a14337da29fa2b2a84d5c35f2eac4156c6e
|
6981bdd6763db54edfe284c1f7d223193584c69a
|
refs/heads/master
| 2022-12-21T13:46:57.273039
| 2022-12-17T18:41:19
| 2022-12-17T18:41:19
| 60,776,831
| 364
| 65
|
Apache-2.0
| 2019-12-13T14:46:43
| 2016-06-09T13:27:37
|
C++
|
UTF-8
|
C
| false
| false
| 7,626
|
h
|
userdiff.h
|
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#ifndef INCLUDE_userdiff_h__
#define INCLUDE_userdiff_h__
/*
* This file isolates the built in diff driver function name patterns.
* Most of these patterns are taken from Git (with permission from the
* original authors for relicensing to libgit2).
*/
typedef struct {
const char *name;
const char *fns;
const char *words;
int flags;
} git_diff_driver_definition;
#define WORD_DEFAULT "|[^[:space:]]|[\xc0-\xff][\x80-\xbf]+"
/*
* These builtin driver definition macros have same signature as in core
* git userdiff.c so that the data can be extracted verbatim
*/
#define PATTERNS(NAME, FN_PATS, WORD_PAT) \
{ NAME, FN_PATS, WORD_PAT WORD_DEFAULT, 0 }
#define IPATTERN(NAME, FN_PATS, WORD_PAT) \
{ NAME, FN_PATS, WORD_PAT WORD_DEFAULT, REG_ICASE }
/*
* The table of diff driver patterns
*
* Function name patterns are a list of newline separated patterns that
* match a function declaration (i.e. the line you want in the hunk header),
* or a negative pattern prefixed with a '!' to reject a pattern (such as
* rejecting goto labels in C code).
*
* Word boundary patterns are just a simple pattern that will be OR'ed with
* the default value above (i.e. whitespace or non-ASCII characters).
*/
static git_diff_driver_definition builtin_defs[] = {
IPATTERN("ada",
"!^(.*[ \t])?(is[ \t]+new|renames|is[ \t]+separate)([ \t].*)?$\n"
"!^[ \t]*with[ \t].*$\n"
"^[ \t]*((procedure|function)[ \t]+.*)$\n"
"^[ \t]*((package|protected|task)[ \t]+.*)$",
/* -- */
"[a-zA-Z][a-zA-Z0-9_]*"
"|[-+]?[0-9][0-9#_.aAbBcCdDeEfF]*([eE][+-]?[0-9_]+)?"
"|=>|\\.\\.|\\*\\*|:=|/=|>=|<=|<<|>>|<>"),
IPATTERN("fortran",
"!^([C*]|[ \t]*!)\n"
"!^[ \t]*MODULE[ \t]+PROCEDURE[ \t]\n"
"^[ \t]*((END[ \t]+)?(PROGRAM|MODULE|BLOCK[ \t]+DATA"
"|([^'\" \t]+[ \t]+)*(SUBROUTINE|FUNCTION))[ \t]+[A-Z].*)$",
/* -- */
"[a-zA-Z][a-zA-Z0-9_]*"
"|\\.([Ee][Qq]|[Nn][Ee]|[Gg][TtEe]|[Ll][TtEe]|[Tt][Rr][Uu][Ee]|[Ff][Aa][Ll][Ss][Ee]|[Aa][Nn][Dd]|[Oo][Rr]|[Nn]?[Ee][Qq][Vv]|[Nn][Oo][Tt])\\."
/* numbers and format statements like 2E14.4, or ES12.6, 9X.
* Don't worry about format statements without leading digits since
* they would have been matched above as a variable anyway. */
"|[-+]?[0-9.]+([AaIiDdEeFfLlTtXx][Ss]?[-+]?[0-9.]*)?(_[a-zA-Z0-9][a-zA-Z0-9_]*)?"
"|//|\\*\\*|::|[/<>=]="),
PATTERNS("html", "^[ \t]*(<[Hh][1-6][ \t].*>.*)$",
"[^<>= \t]+"),
PATTERNS("java",
"!^[ \t]*(catch|do|for|if|instanceof|new|return|switch|throw|while)\n"
"^[ \t]*(([A-Za-z_][A-Za-z_0-9]*[ \t]+)+[A-Za-z_][A-Za-z_0-9]*[ \t]*\\([^;]*)$",
/* -- */
"[a-zA-Z_][a-zA-Z0-9_]*"
"|[-+0-9.e]+[fFlL]?|0[xXbB]?[0-9a-fA-F]+[lL]?"
"|[-+*/<>%&^|=!]="
"|--|\\+\\+|<<=?|>>>?=?|&&|\\|\\|"),
PATTERNS("matlab",
"^[[:space:]]*((classdef|function)[[:space:]].*)$|^%%[[:space:]].*$",
"[a-zA-Z_][a-zA-Z0-9_]*|[-+0-9.e]+|[=~<>]=|\\.[*/\\^']|\\|\\||&&"),
PATTERNS("objc",
/* Negate C statements that can look like functions */
"!^[ \t]*(do|for|if|else|return|switch|while)\n"
/* Objective-C methods */
"^[ \t]*([-+][ \t]*\\([ \t]*[A-Za-z_][A-Za-z_0-9* \t]*\\)[ \t]*[A-Za-z_].*)$\n"
/* C functions */
"^[ \t]*(([A-Za-z_][A-Za-z_0-9]*[ \t]+)+[A-Za-z_][A-Za-z_0-9]*[ \t]*\\([^;]*)$\n"
/* Objective-C class/protocol definitions */
"^(@(implementation|interface|protocol)[ \t].*)$",
/* -- */
"[a-zA-Z_][a-zA-Z0-9_]*"
"|[-+0-9.e]+[fFlL]?|0[xXbB]?[0-9a-fA-F]+[lL]?"
"|[-+*/<>%&^|=!]=|--|\\+\\+|<<=?|>>=?|&&|\\|\\||::|->"),
PATTERNS("pascal",
"^(((class[ \t]+)?(procedure|function)|constructor|destructor|interface|"
"implementation|initialization|finalization)[ \t]*.*)$"
"\n"
"^(.*=[ \t]*(class|record).*)$",
/* -- */
"[a-zA-Z_][a-zA-Z0-9_]*"
"|[-+0-9.e]+|0[xXbB]?[0-9a-fA-F]+"
"|<>|<=|>=|:=|\\.\\."),
PATTERNS("perl",
"^package .*\n"
"^sub [[:alnum:]_':]+[ \t]*"
"(\\([^)]*\\)[ \t]*)?" /* prototype */
/*
* Attributes. A regex can't count nested parentheses,
* so just slurp up whatever we see, taking care not
* to accept lines like "sub foo; # defined elsewhere".
*
* An attribute could contain a semicolon, but at that
* point it seems reasonable enough to give up.
*/
"(:[^;#]*)?"
"(\\{[ \t]*)?" /* brace can come here or on the next line */
"(#.*)?$\n" /* comment */
"^(BEGIN|END|INIT|CHECK|UNITCHECK|AUTOLOAD|DESTROY)[ \t]*"
"(\\{[ \t]*)?" /* brace can come here or on the next line */
"(#.*)?$\n"
"^=head[0-9] .*", /* POD */
/* -- */
"[[:alpha:]_'][[:alnum:]_']*"
"|0[xb]?[0-9a-fA-F_]*"
/* taking care not to interpret 3..5 as (3.)(.5) */
"|[0-9a-fA-F_]+(\\.[0-9a-fA-F_]+)?([eE][-+]?[0-9_]+)?"
"|=>|-[rwxoRWXOezsfdlpSugkbctTBMAC>]|~~|::"
"|&&=|\\|\\|=|//=|\\*\\*="
"|&&|\\|\\||//|\\+\\+|--|\\*\\*|\\.\\.\\.?"
"|[-+*/%.^&<>=!|]="
"|=~|!~"
"|<<|<>|<=>|>>"),
PATTERNS("python", "^[ \t]*((class|def)[ \t].*)$",
/* -- */
"[a-zA-Z_][a-zA-Z0-9_]*"
"|[-+0-9.e]+[jJlL]?|0[xX]?[0-9a-fA-F]+[lL]?"
"|[-+*/<>%&^|=!]=|//=?|<<=?|>>=?|\\*\\*=?"),
PATTERNS("ruby", "^[ \t]*((class|module|def)[ \t].*)$",
/* -- */
"(@|@@|\\$)?[a-zA-Z_][a-zA-Z0-9_]*"
"|[-+0-9.e]+|0[xXbB]?[0-9a-fA-F]+|\\?(\\\\C-)?(\\\\M-)?."
"|//=?|[-+*/<>%&^|=!]=|<<=?|>>=?|===|\\.{1,3}|::|[!=]~"),
PATTERNS("bibtex", "(@[a-zA-Z]{1,}[ \t]*\\{{0,1}[ \t]*[^ \t\"@',\\#}{~%]*).*$",
"[={}\"]|[^={}\" \t]+"),
PATTERNS("tex", "^(\\\\((sub)*section|chapter|part)\\*{0,1}\\{.*)$",
"\\\\[a-zA-Z@]+|\\\\.|[a-zA-Z0-9\x80-\xff]+"),
PATTERNS("cpp",
/* Jump targets or access declarations */
"!^[ \t]*[A-Za-z_][A-Za-z_0-9]*:[[:space:]]*($|/[/*])\n"
/* functions/methods, variables, and compounds at top level */
"^((::[[:space:]]*)?[A-Za-z_].*)$",
/* -- */
"[a-zA-Z_][a-zA-Z0-9_]*"
"|[-+0-9.e]+[fFlL]?|0[xXbB]?[0-9a-fA-F]+[lLuU]*"
"|[-+*/<>%&^|=!]=|--|\\+\\+|<<=?|>>=?|&&|\\|\\||::|->\\*?|\\.\\*"),
PATTERNS("csharp",
/* Keywords */
"!^[ \t]*(do|while|for|if|else|instanceof|new|return|switch|case|throw|catch|using)\n"
/* Methods and constructors */
"^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[<>@._[:alnum:]]+[ \t]*\\(.*\\))[ \t]*$\n"
/* Properties */
"^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[@._[:alnum:]]+)[ \t]*$\n"
/* Type definitions */
"^[ \t]*(((static|public|internal|private|protected|new|unsafe|sealed|abstract|partial)[ \t]+)*(class|enum|interface|struct)[ \t]+.*)$\n"
/* Namespace */
"^[ \t]*(namespace[ \t]+.*)$",
/* -- */
"[a-zA-Z_][a-zA-Z0-9_]*"
"|[-+0-9.e]+[fFlL]?|0[xXbB]?[0-9a-fA-F]+[lL]?"
"|[-+*/<>%&^|=!]=|--|\\+\\+|<<=?|>>=?|&&|\\|\\||::|->"),
PATTERNS("php",
"^[ \t]*(((public|private|protected|static|final)[ \t]+)*((class|function)[ \t].*))$",
/* -- */
"[a-zA-Z_][a-zA-Z0-9_]*"
"|[-+0-9.e]+[fFlL]?|0[xX]?[0-9a-fA-F]+[lL]?"
"|[-+*/<>%&^|=!]=|--|\\+\\+|<<=?|>>=?|&&|\\|\\||::|->"),
PATTERNS("javascript",
"([a-zA-Z_$][a-zA-Z0-9_$]*(\\.[a-zA-Z0-9_$]+)*[ \t]*=[ \t]*function([ \t][a-zA-Z_$][a-zA-Z0-9_$]*)?[^\\{]*)\n"
"([a-zA-Z_$][a-zA-Z0-9_$]*[ \t]*:[ \t]*function([ \t][a-zA-Z_$][a-zA-Z0-9_$]*)?[^\\{]*)\n"
"[^a-zA-Z0-9_\\$](function([ \t][a-zA-Z_$][a-zA-Z0-9_$]*)?[^\\{]*)",
/* -- */
"[a-zA-Z_][a-zA-Z0-9_]*"
"|[-+0-9.e]+[fFlL]?|0[xX]?[0-9a-fA-F]+[lL]?"
"|[-+*/<>%&^|=!]=|--|\\+\\+|<<=?|>>=?|&&|\\|\\||::|->"),
};
#undef IPATTERN
#undef PATTERNS
#undef WORD_DEFAULT
#endif
|
b16781132a0f3b032111d3f28114c9494acaac3d
|
f367e4b66a1ee42e85830b31df88f63723c36a47
|
/lib/cmetrics/src/cmt_decode_opentelemetry.c
|
aad4250710e0c0c11e99dae8521208111f02410c
|
[
"Apache-2.0"
] |
permissive
|
fluent/fluent-bit
|
06873e441162b92941024e9a7e9e8fc934150bf7
|
1a41f49dc2f3ae31a780caa9ffd6137b1d703065
|
refs/heads/master
| 2023-09-05T13:44:55.347372
| 2023-09-05T10:14:33
| 2023-09-05T10:14:33
| 29,933,948
| 4,907
| 1,565
|
Apache-2.0
| 2023-09-14T10:17:02
| 2015-01-27T20:41:52
|
C
|
UTF-8
|
C
| false
| false
| 40,397
|
c
|
cmt_decode_opentelemetry.c
|
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* CMetrics
* ========
* Copyright 2021-2022 The CMetrics Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cmetrics/cmetrics.h>
#include <cmetrics/cmt_metric.h>
#include <cmetrics/cmt_map.h>
#include <cmetrics/cmt_gauge.h>
#include <cmetrics/cmt_counter.h>
#include <cmetrics/cmt_summary.h>
#include <cmetrics/cmt_histogram.h>
#include <cmetrics/cmt_untyped.h>
#include <cmetrics/cmt_compat.h>
#include <cmetrics/cmt_decode_opentelemetry.h>
static struct cfl_variant *clone_variant(Opentelemetry__Proto__Common__V1__AnyValue *source);
static int clone_array(struct cfl_array *target,
Opentelemetry__Proto__Common__V1__ArrayValue *source);
static int clone_array_entry(struct cfl_array *target,
Opentelemetry__Proto__Common__V1__AnyValue *source);
static int clone_kvlist(struct cfl_kvlist *target,
Opentelemetry__Proto__Common__V1__KeyValueList *source);
static int clone_kvlist_entry(struct cfl_kvlist *target,
Opentelemetry__Proto__Common__V1__KeyValue *source);
static struct cmt_map_label *create_label(char *caption, size_t length);
static int append_new_map_label_key(struct cmt_map *map, char *name);
static int append_new_metric_label_value(struct cmt_metric *metric, char *name, size_t length);
static struct cfl_variant *clone_variant(Opentelemetry__Proto__Common__V1__AnyValue *source)
{
struct cfl_kvlist *new_child_kvlist;
struct cfl_array *new_child_array;
struct cfl_variant *result_instance = NULL;
int result;
if (source == NULL) {
return NULL;
}
if (source->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_STRING_VALUE) {
result_instance = cfl_variant_create_from_string(source->string_value);
}
else if (source->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_BOOL_VALUE) {
result_instance = cfl_variant_create_from_bool(source->bool_value);
}
else if (source->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_INT_VALUE) {
result_instance = cfl_variant_create_from_int64(source->int_value);
}
else if (source->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_DOUBLE_VALUE) {
result_instance = cfl_variant_create_from_double(source->double_value);
}
else if (source->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_KVLIST_VALUE) {
new_child_kvlist = cfl_kvlist_create();
if (new_child_kvlist == NULL) {
return NULL;
}
result_instance = cfl_variant_create_from_kvlist(new_child_kvlist);
if (result_instance == NULL) {
cfl_kvlist_destroy(new_child_kvlist);
return NULL;
}
result = clone_kvlist(new_child_kvlist, source->kvlist_value);
if (result) {
cfl_variant_destroy(result_instance);
return NULL;
}
}
else if (source->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_ARRAY_VALUE) {
new_child_array = cfl_array_create(source->array_value->n_values);
if (new_child_array == NULL) {
return NULL;
}
result_instance = cfl_variant_create_from_array(new_child_array);
if (result_instance == NULL) {
cfl_array_destroy(new_child_array);
return NULL;
}
result = clone_array(new_child_array, source->array_value);
if (result) {
cfl_variant_destroy(result_instance);
return NULL;
}
}
else if (source->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_BYTES_VALUE) {
result_instance = cfl_variant_create_from_bytes((char *) source->bytes_value.data, source->bytes_value.len);
}
return result_instance;
}
static int clone_array(struct cfl_array *target,
Opentelemetry__Proto__Common__V1__ArrayValue *source)
{
int result;
size_t index;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
for (index = 0 ;
result == CMT_DECODE_OPENTELEMETRY_SUCCESS &&
index < source->n_values ;
index++) {
result = clone_array_entry(target, source->values[index]);
}
return result;
}
static int clone_array_entry(struct cfl_array *target,
Opentelemetry__Proto__Common__V1__AnyValue *source)
{
struct cfl_variant *new_child_instance;
int result;
new_child_instance = clone_variant(source);
if (new_child_instance == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
result = cfl_array_append(target, new_child_instance);
if (result) {
cfl_variant_destroy(new_child_instance);
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
return CMT_DECODE_OPENTELEMETRY_SUCCESS;
}
static int clone_kvlist(struct cfl_kvlist *target,
Opentelemetry__Proto__Common__V1__KeyValueList *source)
{
int result;
size_t index;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
for (index = 0 ;
result == CMT_DECODE_OPENTELEMETRY_SUCCESS &&
index < source->n_values ;
index++) {
result = clone_kvlist_entry(target, source->values[index]);
}
return 0;
}
static int clone_kvlist_entry(struct cfl_kvlist *target,
Opentelemetry__Proto__Common__V1__KeyValue *source)
{
struct cfl_variant *new_child_instance;
int result;
new_child_instance = clone_variant(source->value);
if (new_child_instance == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
result = cfl_kvlist_insert(target, source->key, new_child_instance);
if (result) {
cfl_variant_destroy(new_child_instance);
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
return CMT_DECODE_OPENTELEMETRY_SUCCESS;
}
struct cfl_kvlist *get_or_create_external_metadata_kvlist(
struct cfl_kvlist *root, char *key)
{
struct cfl_variant *entry_variant;
struct cfl_kvlist *entry_kvlist;
int result;
entry_variant = cfl_kvlist_fetch(root, key);
if (entry_variant == NULL) {
entry_kvlist = cfl_kvlist_create();
if (entry_kvlist == NULL) {
return NULL;
}
result = cfl_kvlist_insert_kvlist(root,
key,
entry_kvlist);
if (result != 0) {
cfl_kvlist_destroy(entry_kvlist);
return NULL;
}
}
else {
entry_kvlist = entry_variant->data.as_kvlist;
}
return entry_kvlist;
}
static struct cmt_map_label *create_label(char *caption, size_t length)
{
struct cmt_map_label *instance;
instance = calloc(1, sizeof(struct cmt_map_label));
if (instance != NULL) {
if (caption != NULL) {
if (length == 0) {
length = strlen(caption);
}
instance->name = cfl_sds_create_len(caption, length);
if (instance->name == NULL) {
cmt_errno();
free(instance);
instance = NULL;
}
}
}
return instance;
}
static int append_new_map_label_key(struct cmt_map *map, char *name)
{
struct cmt_map_label *label;
label = create_label(name, 0);
if (label == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
cfl_list_add(&label->_head, &map->label_keys);
map->label_count++;
return CMT_DECODE_OPENTELEMETRY_SUCCESS;
}
static int append_new_metric_label_value(struct cmt_metric *metric, char *name, size_t length)
{
struct cmt_map_label *label;
label = create_label(name, length);
if (label == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
cfl_list_add(&label->_head, &metric->labels);
return CMT_DECODE_OPENTELEMETRY_SUCCESS;
}
static int decode_data_point_labels(struct cmt *cmt,
struct cmt_map *map,
struct cmt_metric *metric,
size_t attribute_count,
Opentelemetry__Proto__Common__V1__KeyValue **attribute_list)
{
char dummy_label_value[32];
void **value_index_list;
size_t attribute_index;
size_t map_label_index;
size_t map_label_count;
struct cfl_list *label_iterator;
struct cmt_map_label *current_label;
size_t label_index;
int label_found;
Opentelemetry__Proto__Common__V1__KeyValue *attribute;
int result;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
if (attribute_count == 0) {
return result;
}
if (attribute_count > 127) {
return CMT_DECODE_OPENTELEMETRY_INVALID_ARGUMENT_ERROR;
}
value_index_list = calloc(128, sizeof(void *));
if (value_index_list == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
for (attribute_index = 0 ;
result == CMT_DECODE_OPENTELEMETRY_SUCCESS &&
attribute_index < attribute_count ;
attribute_index++) {
attribute = attribute_list[attribute_index];
label_found = CMT_FALSE;
label_index = 0;
cfl_list_foreach(label_iterator, &map->label_keys) {
current_label = cfl_list_entry(label_iterator, struct cmt_map_label, _head);
if (strcmp(current_label->name, attribute->key) == 0) {
label_found = CMT_TRUE;
break;
}
label_index++;
}
if (label_found == CMT_FALSE) {
result = append_new_map_label_key(map, attribute->key);
}
if (result == CMT_DECODE_OPENTELEMETRY_SUCCESS) {
value_index_list[label_index] = (void *) attribute;
}
}
map_label_count = cfl_list_size(&map->label_keys);
for (map_label_index = 0 ;
result == CMT_DECODE_OPENTELEMETRY_SUCCESS &&
map_label_index < map_label_count ;
map_label_index++) {
if (value_index_list[map_label_index] != NULL) {
attribute = (Opentelemetry__Proto__Common__V1__KeyValue *)
value_index_list[map_label_index];
if (attribute->value->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_STRING_VALUE) {
result = append_new_metric_label_value(metric, attribute->value->string_value, 0);
}
else if (attribute->value->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_BYTES_VALUE) {
result = append_new_metric_label_value(metric,
(char *) attribute->value->bytes_value.data,
attribute->value->bytes_value.len);
}
else if (attribute->value->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_BOOL_VALUE) {
snprintf(dummy_label_value, sizeof(dummy_label_value) - 1, "%d", attribute->value->bool_value);
result = append_new_metric_label_value(metric, dummy_label_value, 0);
}
else if (attribute->value->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_INT_VALUE) {
snprintf(dummy_label_value, sizeof(dummy_label_value) - 1, "%" PRIi64, attribute->value->int_value);
result = append_new_metric_label_value(metric, dummy_label_value, 0);
}
else if (attribute->value->value_case == OPENTELEMETRY__PROTO__COMMON__V1__ANY_VALUE__VALUE_DOUBLE_VALUE) {
snprintf(dummy_label_value, sizeof(dummy_label_value) - 1, "%.17g", attribute->value->double_value);
result = append_new_metric_label_value(metric, dummy_label_value, 0);
}
else {
result = append_new_metric_label_value(metric, NULL, 0);
}
}
}
free(value_index_list);
return result;
}
static int decode_numerical_data_point(struct cmt *cmt,
struct cmt_map *map,
Opentelemetry__Proto__Metrics__V1__NumberDataPoint *data_point)
{
int static_metric_detected;
struct cmt_metric *sample;
int result;
double value;
static_metric_detected = CMT_FALSE;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
if (data_point->n_attributes == 0) {
if (map->metric_static_set == CMT_FALSE) {
static_metric_detected = CMT_TRUE;
}
}
if (static_metric_detected == CMT_FALSE) {
sample = calloc(1, sizeof(struct cmt_metric));
if (sample == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
cfl_list_init(&sample->labels);
result = decode_data_point_labels(cmt,
map,
sample,
data_point->n_attributes,
data_point->attributes);
if (result) {
destroy_label_list(&sample->labels);
free(sample);
}
else {
cfl_list_add(&sample->_head, &map->metrics);
}
}
else {
sample = &map->metric;
map->metric_static_set = CMT_TRUE;
}
if (result == CMT_DECODE_OPENTELEMETRY_SUCCESS) {
value = 0;
if (data_point->value_case == OPENTELEMETRY__PROTO__METRICS__V1__NUMBER_DATA_POINT__VALUE_AS_INT) {
if (data_point->as_int < 0) {
value = 0;
}
else {
value = cmt_math_uint64_to_d64((uint64_t) data_point->as_int);
}
}
else if (data_point->value_case == OPENTELEMETRY__PROTO__METRICS__V1__NUMBER_DATA_POINT__VALUE_AS_DOUBLE) {
value = data_point->as_double;
}
cmt_metric_set(sample, data_point->time_unix_nano, value);
}
return result;
}
static int decode_numerical_data_point_list(struct cmt *cmt,
struct cmt_map *map,
size_t data_point_count,
Opentelemetry__Proto__Metrics__V1__NumberDataPoint **data_point_list)
{
size_t index;
int result;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
for (index = 0 ;
result == 0 &&
index < data_point_count ; index++) {
result = decode_numerical_data_point(cmt, map, data_point_list[index]);
}
return result;
}
static int decode_summary_data_point(struct cmt *cmt,
struct cmt_map *map,
Opentelemetry__Proto__Metrics__V1__SummaryDataPoint *data_point)
{
int static_metric_detected;
struct cmt_summary *summary;
struct cmt_metric *sample;
int result;
size_t index;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
summary = (struct cmt_summary *) map->parent;
if (summary->quantiles == NULL) {
summary->quantiles = calloc(data_point->n_quantile_values,
sizeof(double));
if (summary->quantiles == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
summary->quantiles_count = data_point->n_quantile_values;
for (index = 0 ;
index < data_point->n_quantile_values ;
index++) {
summary->quantiles[index] = data_point->quantile_values[index]->quantile;
}
}
static_metric_detected = CMT_FALSE;
if (data_point->n_attributes == 0) {
if (map->metric_static_set == CMT_FALSE) {
static_metric_detected = CMT_TRUE;
}
}
if (static_metric_detected == CMT_FALSE) {
sample = calloc(1, sizeof(struct cmt_metric));
if (sample == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
cfl_list_init(&sample->labels);
result = decode_data_point_labels(cmt,
map,
sample,
data_point->n_attributes,
data_point->attributes);
if (result) {
destroy_label_list(&sample->labels);
free(sample);
return result;
}
else {
cfl_list_add(&sample->_head, &map->metrics);
}
}
else {
sample = &map->metric;
map->metric_static_set = CMT_TRUE;
}
if (result == CMT_DECODE_OPENTELEMETRY_SUCCESS) {
if (sample->sum_quantiles_set == CMT_FALSE) {
sample->sum_quantiles = calloc(data_point->n_quantile_values,
sizeof(uint64_t));
if (sample->sum_quantiles == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
sample->sum_quantiles_set = CMT_TRUE;
sample->sum_quantiles_count = data_point->n_quantile_values;
}
for (index = 0 ;
index < data_point->n_quantile_values ;
index++) {
cmt_summary_quantile_set(sample, data_point->time_unix_nano,
index, data_point->quantile_values[index]->value);
}
sample->sum_sum = cmt_math_d64_to_uint64(data_point->sum);
sample->sum_count = data_point->count;
}
return result;
}
static int decode_summary_data_point_list(struct cmt *cmt,
struct cmt_map *map,
size_t data_point_count,
Opentelemetry__Proto__Metrics__V1__SummaryDataPoint **data_point_list)
{
size_t index;
int result;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
for (index = 0 ;
result == CMT_DECODE_OPENTELEMETRY_SUCCESS &&
index < data_point_count ; index++) {
result = decode_summary_data_point(cmt, map, data_point_list[index]);
}
return result;
}
static int decode_histogram_data_point(struct cmt *cmt,
struct cmt_map *map,
Opentelemetry__Proto__Metrics__V1__HistogramDataPoint *data_point)
{
int static_metric_detected;
struct cmt_histogram *histogram;
struct cmt_metric *sample;
int result;
size_t index;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
histogram = (struct cmt_histogram *) map->parent;
if (data_point->n_bucket_counts > data_point->n_explicit_bounds + 1) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
if (histogram->buckets == NULL) {
histogram->buckets = cmt_histogram_buckets_create_size(data_point->explicit_bounds,
data_point->n_explicit_bounds);
if (histogram->buckets == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
}
static_metric_detected = CMT_FALSE;
if (data_point->n_attributes == 0) {
if (map->metric_static_set == CMT_FALSE) {
static_metric_detected = CMT_TRUE;
}
}
if (static_metric_detected == CMT_FALSE) {
sample = calloc(1, sizeof(struct cmt_metric));
if (sample == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
cfl_list_init(&sample->labels);
result = decode_data_point_labels(cmt,
map,
sample,
data_point->n_attributes,
data_point->attributes);
if (result != 0) {
destroy_label_list(&sample->labels);
free(sample);
return result;
}
else {
cfl_list_add(&sample->_head, &map->metrics);
}
}
else {
sample = &map->metric;
map->metric_static_set = CMT_TRUE;
}
if (result == CMT_DECODE_OPENTELEMETRY_SUCCESS) {
if (sample->hist_buckets == NULL) {
sample->hist_buckets = calloc(data_point->n_bucket_counts + 1,
sizeof(uint64_t));
if (sample->hist_buckets == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
}
for (index = 0 ;
index < data_point->n_bucket_counts;
index++) {
cmt_metric_hist_set(sample, data_point->time_unix_nano,
index, data_point->bucket_counts[index]);
}
sample->hist_sum = cmt_math_d64_to_uint64(data_point->sum);
sample->hist_count = data_point->count;
}
return result;
}
static int decode_histogram_data_point_list(struct cmt *cmt,
struct cmt_map *map,
size_t data_point_count,
Opentelemetry__Proto__Metrics__V1__HistogramDataPoint **data_point_list)
{
size_t index;
int result;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
for (index = 0 ;
result == 0 &&
index < data_point_count ; index++) {
result = decode_histogram_data_point(cmt, map, data_point_list[index]);
}
return result;
}
static int decode_counter_entry(struct cmt *cmt,
void *instance,
Opentelemetry__Proto__Metrics__V1__Sum *metric)
{
struct cmt_counter *counter;
int result;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
counter = (struct cmt_counter *) instance;
counter->map->metric_static_set = 0;
result = decode_numerical_data_point_list(cmt,
counter->map,
metric->n_data_points,
metric->data_points);
if (result == CMT_DECODE_OPENTELEMETRY_SUCCESS) {
if (metric->aggregation_temporality == OPENTELEMETRY__PROTO__METRICS__V1__AGGREGATION_TEMPORALITY__AGGREGATION_TEMPORALITY_DELTA) {
counter->aggregation_type = CMT_AGGREGATION_TYPE_DELTA;
}
else if (metric->aggregation_temporality == OPENTELEMETRY__PROTO__METRICS__V1__AGGREGATION_TEMPORALITY__AGGREGATION_TEMPORALITY_CUMULATIVE) {
counter->aggregation_type = CMT_AGGREGATION_TYPE_CUMULATIVE;
}
else {
counter->aggregation_type = CMT_AGGREGATION_TYPE_UNSPECIFIED;
}
counter->allow_reset = !metric->is_monotonic;
}
return result;
}
static int decode_gauge_entry(struct cmt *cmt,
void *instance,
Opentelemetry__Proto__Metrics__V1__Gauge *metric)
{
struct cmt_gauge *gauge;
int result;
gauge = (struct cmt_gauge *) instance;
gauge->map->metric_static_set = 0;
result = decode_numerical_data_point_list(cmt,
gauge->map,
metric->n_data_points,
metric->data_points);
return result;
}
static int decode_summary_entry(struct cmt *cmt,
void *instance,
Opentelemetry__Proto__Metrics__V1__Summary *metric)
{
struct cmt_summary *summary;
int result;
summary = (struct cmt_summary *) instance;
if (summary->quantiles != NULL) {
free(summary->quantiles);
}
summary->quantiles = NULL;
summary->quantiles_count = 0;
summary->map->metric_static_set = 0;
result = decode_summary_data_point_list(cmt,
summary->map,
metric->n_data_points,
metric->data_points);
return result;
}
static int decode_histogram_entry(struct cmt *cmt,
void *instance,
Opentelemetry__Proto__Metrics__V1__Histogram *metric)
{
struct cmt_histogram *histogram;
int result;
histogram = (struct cmt_histogram *) instance;
histogram->buckets = NULL;
histogram->map->metric_static_set = 0;
result = decode_histogram_data_point_list(cmt,
histogram->map,
metric->n_data_points,
metric->data_points);
if (result == CMT_DECODE_OPENTELEMETRY_SUCCESS) {
if (metric->aggregation_temporality == OPENTELEMETRY__PROTO__METRICS__V1__AGGREGATION_TEMPORALITY__AGGREGATION_TEMPORALITY_DELTA) {
histogram->aggregation_type = CMT_AGGREGATION_TYPE_DELTA;
}
else if (metric->aggregation_temporality == OPENTELEMETRY__PROTO__METRICS__V1__AGGREGATION_TEMPORALITY__AGGREGATION_TEMPORALITY_CUMULATIVE) {
histogram->aggregation_type = CMT_AGGREGATION_TYPE_CUMULATIVE;
}
else {
histogram->aggregation_type = CMT_AGGREGATION_TYPE_UNSPECIFIED;
}
}
return result;
}
static int decode_metrics_entry(struct cmt *cmt,
Opentelemetry__Proto__Metrics__V1__Metric *metric)
{
char *metric_description;
char *metric_namespace;
char *metric_subsystem;
char *metric_name;
void *instance;
int result;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
metric_name = metric->name;
metric_namespace = "";
metric_subsystem = "";
metric_description = metric->description;
if (metric_description == NULL) {
metric_description = "-";
}
else if (strlen(metric_description) == 0) {
metric_description = "-";
}
if (metric->data_case == OPENTELEMETRY__PROTO__METRICS__V1__METRIC__DATA_SUM) {
instance = cmt_counter_create(cmt,
metric_namespace,
metric_subsystem,
metric_name,
metric_description,
0, NULL);
if (instance == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
result = decode_counter_entry(cmt, instance, metric->sum);
if (result) {
cmt_counter_destroy(instance);
}
}
else if (metric->data_case == OPENTELEMETRY__PROTO__METRICS__V1__METRIC__DATA_GAUGE) {
instance = cmt_gauge_create(cmt,
metric_namespace,
metric_subsystem,
metric_name,
metric_description,
0, NULL);
if (instance == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
result = decode_gauge_entry(cmt, instance, metric->gauge);
if (result) {
cmt_gauge_destroy(instance);
}
}
else if (metric->data_case == OPENTELEMETRY__PROTO__METRICS__V1__METRIC__DATA_SUMMARY) {
instance = cmt_summary_create(cmt,
metric_namespace,
metric_subsystem,
metric_name,
metric_description,
1, (double []) { 0.0 },
0, NULL);
if (instance == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
/* We are forced to create at least one quantile by the constructor but we
* don't know the details about it at the moment so we just leave it "open"
*/
result = decode_summary_entry(cmt, instance, metric->summary);
if (result) {
cmt_summary_destroy(instance);
}
}
else if (metric->data_case == OPENTELEMETRY__PROTO__METRICS__V1__METRIC__DATA_HISTOGRAM) {
instance = cmt_histogram_create(cmt,
metric_namespace,
metric_subsystem,
metric_name,
metric_description,
(struct cmt_histogram_buckets *) cmt,
0, NULL);
if (instance == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
result = decode_histogram_entry(cmt, instance, metric->histogram);
if (result) {
cmt_histogram_destroy(instance);
}
}
return result;
}
static int decode_scope_metadata_and_attributes(struct cfl_kvlist *external_metadata,
Opentelemetry__Proto__Common__V1__InstrumentationScope *scope)
{
struct cfl_kvlist *attributes;
struct cfl_kvlist *metadata;
int result;
size_t index;
struct cfl_kvlist *root;
root = get_or_create_external_metadata_kvlist(external_metadata, "scope");
if (root == NULL) {
return -1;
}
metadata = get_or_create_external_metadata_kvlist(root, "metadata");
if (metadata == NULL) {
return -2;
}
attributes = get_or_create_external_metadata_kvlist(root, "attributes");
if (attributes == NULL) {
return -3;
}
if (scope == NULL) {
return 0;
}
if (scope->name != NULL) {
result = cfl_kvlist_insert_string(metadata, "name", scope->name);
if (result != 0) {
return -4;
}
}
if (scope->version != NULL) {
result = cfl_kvlist_insert_string(metadata, "version", scope->version);
if (result != 0) {
return -5;
}
}
result = cfl_kvlist_insert_int64(metadata, "dropped_attributes_count", scope->dropped_attributes_count);
if (result != 0) {
return -6;
}
for (index = 0 ;
result == CMT_DECODE_OPENTELEMETRY_SUCCESS &&
index < scope->n_attributes ;
index++) {
result = clone_kvlist_entry(attributes,
scope->attributes[index]);
}
if (result != 0) {
return -7;
}
return 0;
}
static int decode_scope_metrics_metadata(struct cfl_kvlist *external_metadata,
Opentelemetry__Proto__Metrics__V1__ScopeMetrics *scope_metrics)
{
struct cfl_kvlist *scope_metrics_metadata;
struct cfl_kvlist *scope_metrics_root;
int result;
scope_metrics_root = get_or_create_external_metadata_kvlist(external_metadata, "scope_metrics");
if (scope_metrics_root == NULL) {
return -1;
}
scope_metrics_metadata = get_or_create_external_metadata_kvlist(scope_metrics_root, "metadata");
if (scope_metrics_metadata == NULL) {
return -2;
}
if (scope_metrics == NULL) {
return 0;
}
if (scope_metrics->schema_url != NULL) {
result = cfl_kvlist_insert_string(scope_metrics_metadata, "schema_url", scope_metrics->schema_url);
if (result != 0) {
return -3;
}
}
return 0;
}
static int decode_scope_metrics_entry(struct cfl_list *context_list,
Opentelemetry__Proto__Metrics__V1__ScopeMetrics *metrics)
{
struct cmt *context;
int result;
size_t index;
context = cmt_create();
if (context == NULL) {
return CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
cfl_list_add(&context->_head, context_list);
result = cfl_kvlist_insert_string(context->internal_metadata,
"producer",
"opentelemetry");
if (result != 0) {
result = CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
if (result == CMT_DECODE_OPENTELEMETRY_SUCCESS) {
result = decode_scope_metadata_and_attributes(context->external_metadata,
metrics->scope);
if (result != 0) {
result = CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
}
if (result == CMT_DECODE_OPENTELEMETRY_SUCCESS) {
result = decode_scope_metrics_metadata(context->external_metadata,
metrics);
if (result != 0) {
result = CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
}
if (result != CMT_DECODE_OPENTELEMETRY_SUCCESS) {
return result;
}
for (index = 0 ;
result == CMT_DECODE_OPENTELEMETRY_SUCCESS &&
index < metrics->n_metrics ;
index++) {
result = decode_metrics_entry(context,
metrics->metrics[index]);
}
return result;
}
static int decode_resource_metadata_and_attributes(struct cfl_kvlist *external_metadata,
Opentelemetry__Proto__Resource__V1__Resource *resource)
{
struct cfl_kvlist *attributes;
struct cfl_kvlist *metadata;
int result;
size_t index;
struct cfl_kvlist *root;
root = get_or_create_external_metadata_kvlist(external_metadata, "resource");
if (root == NULL) {
return -1;
}
metadata = get_or_create_external_metadata_kvlist(root, "metadata");
if (metadata == NULL) {
return -2;
}
attributes = get_or_create_external_metadata_kvlist(root, "attributes");
if (attributes == NULL) {
return -3;
}
if (resource == NULL) {
return 0;
}
result = cfl_kvlist_insert_int64(metadata, "dropped_attributes_count", (int64_t) resource->dropped_attributes_count);
if (result != 0) {
return -4;
}
for (index = 0 ;
result == CMT_DECODE_OPENTELEMETRY_SUCCESS &&
index < resource->n_attributes ;
index++) {
result = clone_kvlist_entry(attributes,
resource->attributes[index]);
}
if (result != CMT_DECODE_OPENTELEMETRY_SUCCESS) {
return -5;
}
return 0;
}
static int decode_resource_metrics_metadata(struct cfl_kvlist *external_metadata,
Opentelemetry__Proto__Metrics__V1__ResourceMetrics *resource_metrics)
{
struct cfl_kvlist *resource_metrics_metadata;
struct cfl_kvlist *resource_metrics_root;
int result;
resource_metrics_root = get_or_create_external_metadata_kvlist(external_metadata, "resource_metrics");
if (resource_metrics_root == NULL) {
return -1;
}
resource_metrics_metadata = get_or_create_external_metadata_kvlist(resource_metrics_root, "metadata");
if (resource_metrics_metadata == NULL) {
return -2;
}
if (resource_metrics == NULL) {
return 0;
}
if (resource_metrics->schema_url != NULL) {
result = cfl_kvlist_insert_string(resource_metrics_metadata, "schema_url", resource_metrics->schema_url);
if (result != 0) {
return -3;
}
}
return 0;
}
static int decode_resource_metrics_entry(
struct cfl_list *context_list,
Opentelemetry__Proto__Metrics__V1__ResourceMetrics *resource_metrics)
{
struct cmt *context;
int result;
size_t index;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
for (index = 0 ;
result == CMT_DECODE_OPENTELEMETRY_SUCCESS &&
index < resource_metrics->n_scope_metrics ;
index++) {
result = decode_scope_metrics_entry(context_list,
resource_metrics->scope_metrics[index]);
if (result == CMT_DECODE_OPENTELEMETRY_SUCCESS) {
context = cfl_list_entry_last(context_list, struct cmt, _head);
if (context != NULL) {
if (resource_metrics->resource != NULL) {
result = decode_resource_metadata_and_attributes(context->external_metadata,
resource_metrics->resource);
if (result != 0) {
result = CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
}
if (result == CMT_DECODE_OPENTELEMETRY_SUCCESS) {
result = decode_resource_metrics_metadata(context->external_metadata,
resource_metrics);
if (result != 0) {
result = CMT_DECODE_OPENTELEMETRY_ALLOCATION_ERROR;
}
}
}
}
}
return result;
}
static void destroy_context_list(struct cfl_list *context_list)
{
struct cfl_list *iterator;
struct cmt *context;
struct cfl_list *tmp;
cfl_list_foreach_safe(iterator, tmp, context_list) {
context = cfl_list_entry(iterator, struct cmt, _head);
cfl_list_del(&context->_head);
cmt_destroy(context);
}
}
static int decode_service_request(struct cfl_list *context_list,
Opentelemetry__Proto__Collector__Metrics__V1__ExportMetricsServiceRequest *service_request)
{
int result;
size_t index;
result = CMT_DECODE_OPENTELEMETRY_SUCCESS;
if (service_request->n_resource_metrics > 0) {
for (index = 0 ;
result == CMT_DECODE_OPENTELEMETRY_SUCCESS &&
index < service_request->n_resource_metrics ;
index++) {
result = decode_resource_metrics_entry(context_list,
service_request->resource_metrics[index]);
}
}
return result;
}
int cmt_decode_opentelemetry_create(struct cfl_list *result_context_list,
char *in_buf, size_t in_size,
size_t *offset)
{
Opentelemetry__Proto__Collector__Metrics__V1__ExportMetricsServiceRequest *service_request;
int result;
result = CMT_DECODE_OPENTELEMETRY_INVALID_ARGUMENT_ERROR;
cfl_list_init(result_context_list);
service_request = opentelemetry__proto__collector__metrics__v1__export_metrics_service_request__unpack(NULL, in_size - *offset,
(unsigned char *) &in_buf[*offset]);
if (service_request != NULL) {
result = decode_service_request(result_context_list, service_request);
opentelemetry__proto__collector__metrics__v1__export_metrics_service_request__free_unpacked(service_request, NULL);
}
if (result != CMT_DECODE_OPENTELEMETRY_SUCCESS) {
destroy_context_list(result_context_list);
}
return result;
}
void cmt_decode_opentelemetry_destroy(struct cfl_list *context_list)
{
if (context_list != NULL) {
destroy_context_list(context_list);
}
}
|
7118c1a21c52eae19979212b59a9b0f90500ab42
|
e73547787354afd9b717ea57fe8dd0695d161821
|
/src/battle/area/mac/area.c
|
63d42a4531bce4b8399e9efe6ed1f787ba9971f1
|
[] |
no_license
|
pmret/papermario
|
8b514b19653cef8d6145e47499b3636b8c474a37
|
9774b26d93f1045dd2a67e502b6efc9599fb6c31
|
refs/heads/main
| 2023-08-31T07:09:48.951514
| 2023-08-21T18:07:08
| 2023-08-21T18:07:08
| 287,151,133
| 904
| 139
| null | 2023-09-14T02:44:23
| 2020-08-13T01:22:57
|
C
|
UTF-8
|
C
| false
| false
| 1,096
|
c
|
area.c
|
#include "area.h"
extern ActorBlueprint A(chan);
extern ActorBlueprint A(lee);
extern ActorBlueprint A(master1);
extern ActorBlueprint A(master2);
extern ActorBlueprint A(master3);
extern Stage A(mac_01);
extern Stage A(mac_02);
Formation A(Formation_00) = {
ACTOR_BY_IDX(A(chan), BTL_POS_GROUND_C, 10),
};
Formation A(Formation_01) = {
ACTOR_BY_IDX(A(lee), BTL_POS_GROUND_C, 10),
};
Formation A(Formation_02) = {
ACTOR_BY_IDX(A(master1), BTL_POS_GROUND_C, 10),
};
Formation A(Formation_03) = {
ACTOR_BY_IDX(A(master2), BTL_POS_GROUND_C, 10),
};
Formation A(Formation_04) = {
ACTOR_BY_IDX(A(master3), BTL_POS_GROUND_C, 10),
};
BattleList A(Formations) = {
BATTLE(A(Formation_00), A(mac_02), "チェン"),
BATTLE(A(Formation_01), A(mac_02), "リー"),
BATTLE(A(Formation_02), A(mac_02), "シショー その1"),
BATTLE(A(Formation_03), A(mac_02), "シショー その2"),
BATTLE(A(Formation_04), A(mac_02), "シショー その3"),
{},
};
StageList A(Stages) = {
STAGE("mac_01", A(mac_01)),
STAGE("mac_02", A(mac_02)),
{},
};
|
125969fe188460945eb330f19c1e2747f5c2d239
|
481d8268f533c0b5527112f9b7d709aaa22ab053
|
/src/dec/xed-decoded-inst.c
|
d2813c609c1084239cf687e32afd08882b2f540e
|
[
"Apache-2.0"
] |
permissive
|
intelxed/xed
|
590f60c564dc75004e51d95773fc1d4d4bfba1d3
|
01a6da8090af84cd52f6c1070377ae6e885b078f
|
refs/heads/main
| 2023-08-25T01:30:27.421743
| 2023-08-21T17:19:26
| 2023-08-21T17:19:26
| 75,980,044
| 1,390
| 175
|
Apache-2.0
| 2023-08-07T11:12:00
| 2016-12-08T22:21:22
|
Python
|
UTF-8
|
C
| false
| false
| 34,316
|
c
|
xed-decoded-inst.c
|
/* BEGIN_LEGAL
Copyright (c) 2023 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
END_LEGAL */
#include "xed-internal-header.h"
#include "xed-decoded-inst.h"
#include "xed-decoded-inst-api.h"
#include "xed-decoded-inst-private.h"
#include "xed-util.h"
#include "xed-operand-values-interface.h"
#include "xed-reg-class.h"
#include "xed-isa-set.h"
#include "xed-ild.h"
xed_reg_enum_t xed_decoded_inst_get_reg(const xed_decoded_inst_t* p,
xed_operand_enum_t reg_operand) {
switch(reg_operand) {
case XED_OPERAND_REG0: return xed3_operand_get_reg0(p);
case XED_OPERAND_REG1: return xed3_operand_get_reg1(p);
case XED_OPERAND_REG2: return xed3_operand_get_reg2(p);
case XED_OPERAND_REG3: return xed3_operand_get_reg3(p);
case XED_OPERAND_REG4: return xed3_operand_get_reg4(p);
case XED_OPERAND_REG5: return xed3_operand_get_reg5(p);
case XED_OPERAND_REG6: return xed3_operand_get_reg6(p);
case XED_OPERAND_REG7: return xed3_operand_get_reg7(p);
case XED_OPERAND_REG8: return xed3_operand_get_reg8(p);
case XED_OPERAND_REG9: return xed3_operand_get_reg9(p);
case XED_OPERAND_BASE0: return xed3_operand_get_base0(p);
case XED_OPERAND_BASE1: return xed3_operand_get_base1(p);
case XED_OPERAND_SEG0: return xed3_operand_get_seg0(p);
case XED_OPERAND_SEG1: return xed3_operand_get_seg1(p);
case XED_OPERAND_INDEX: return xed3_operand_get_index(p);
default:
return XED_REG_INVALID;
}
}
xed_uint32_t
xed_decoded_inst_get_attribute(const xed_decoded_inst_t* p,
xed_attribute_enum_t attr)
{
xed_assert(p->_inst != 0);
return xed_inst_get_attribute(p->_inst, attr);
}
xed_attributes_t
xed_decoded_inst_get_attributes(const xed_decoded_inst_t* p)
{
xed_assert(p->_inst != 0);
return xed_inst_get_attributes(p->_inst);
}
/* xrelease is valid when we have:
1: F3 (REP) prefix AND
2: (a) xchg inst. OR
(b) lock prefix OR
(c) mov mem,reg or mov mem,imm where reg is a normal register.
** cmpxchg16b inst. is special and can not have xacquire
*/
xed_uint32_t
xed_decoded_inst_is_xrelease(const xed_decoded_inst_t* p){
xed_iclass_enum_t iclass;
const xed_operand_values_t* ov;
xed_uint32_t rel_able =
xed_decoded_inst_get_attribute(p,XED_ATTRIBUTE_HLE_REL_ABLE);
if (rel_able){
ov = xed_decoded_inst_operands_const(p);
if (xed_operand_values_has_rep_prefix(ov)){
iclass = xed_decoded_inst_get_iclass(p);
if (xed_operand_values_get_atomic(ov) || iclass == XED_ICLASS_MOV){
//mov instruction do not need the lock prefix
return 1;
}
}
}
return 0;
}
/* xacquire is valid when we have:
1: F2 (REPNE) prefix
2: xchg inst. OR lock prefix
** cmpxchg16b inst. is special and can not have xacquire
*/
xed_uint32_t
xed_decoded_inst_is_xacquire(const xed_decoded_inst_t* p){
const xed_operand_values_t* ov;
xed_uint32_t acq_able =
xed_decoded_inst_get_attribute(p,XED_ATTRIBUTE_HLE_ACQ_ABLE);
if (acq_able){
ov = xed_decoded_inst_operands_const(p);
if (xed_operand_values_has_repne_prefix(ov)){
return xed_operand_values_get_atomic(ov);
}
}
return 0;
}
xed_uint32_t
xed_decoded_inst_has_mpx_prefix(const xed_decoded_inst_t* p){
const xed_operand_values_t* ov;
xed_uint32_t mpx_able = xed_decoded_inst_get_attribute(p,
XED_ATTRIBUTE_MPX_PREFIX_ABLE);
if (mpx_able){
ov = xed_decoded_inst_operands_const(p);
if (xed_operand_values_has_repne_prefix(ov)){
return 1;
}
}
return 0;
}
xed_uint8_t
xed_decoded_inst_get_modrm(const xed_decoded_inst_t* p)
{
return XED_STATIC_CAST(xed_uint8_t,xed3_operand_get_modrm_byte(p));
}
/////////////////////////////////////////////////////////////////////////
xed_int64_t
xed_decoded_inst_get_branch_displacement(const xed_decoded_inst_t* p) {
return xed_operand_values_get_branch_displacement_int64(p);
}
xed_uint_t
xed_decoded_inst_get_branch_displacement_width(const xed_decoded_inst_t* p) {
return xed3_operand_get_brdisp_width(p)/8;
}
xed_uint_t
xed_decoded_inst_get_branch_displacement_width_bits(const xed_decoded_inst_t* p) {
return xed3_operand_get_brdisp_width(p);
}
/////////////////////////////////////////////////////////////////////////
xed_uint64_t
xed_decoded_inst_get_unsigned_immediate(const xed_decoded_inst_t* p) {
return xed_operand_values_get_immediate_uint64(p);
}
xed_int32_t
xed_decoded_inst_get_signed_immediate(const xed_decoded_inst_t* p) {
xed_int64_t y = xed_operand_values_get_immediate_int64(p);
return XED_STATIC_CAST(xed_int32_t,y);
}
xed_uint_t
xed_decoded_inst_get_immediate_width(const xed_decoded_inst_t* p) {
return xed3_operand_get_imm_width(p)/8;
}
xed_uint_t
xed_decoded_inst_get_immediate_width_bits(const xed_decoded_inst_t* p) {
return xed3_operand_get_imm_width(p);
}
xed_uint_t
xed_decoded_inst_get_immediate_is_signed(const xed_decoded_inst_t* p) {
//return xed_operand_values_get_immediate_is_signed(p);
return xed3_operand_get_imm0signed(p);
}
/////////////////////////////////////////////////////////////////////////
xed_int64_t
xed_decoded_inst_get_memory_displacement(const xed_decoded_inst_t* p,
unsigned int mem_idx)
{
if (xed_operand_values_has_memory_displacement(p))
{
switch(mem_idx)
{
case 0:
return xed_operand_values_get_memory_displacement_int64(p);
case 1:
return 0;
default:
xed_assert(mem_idx == 0 || mem_idx == 1);
}
}
return 0;
}
xed_uint_t
xed_decoded_inst_get_memory_displacement_width(const xed_decoded_inst_t* p,
unsigned int mem_idx)
{
return xed_decoded_inst_get_memory_displacement_width_bits(p,mem_idx)/8;
}
xed_uint_t
xed_decoded_inst_get_memory_displacement_width_bits(
const xed_decoded_inst_t* p,
unsigned int mem_idx)
{
if (xed_operand_values_has_memory_displacement(p))
{
switch(mem_idx) {
case 0:
return xed_operand_values_get_memory_displacement_length_bits(p);
case 1:
return 0;
default:
xed_assert(mem_idx == 0 || mem_idx == 1);
}
}
return 0;
}
xed_reg_enum_t xed_decoded_inst_get_seg_reg(const xed_decoded_inst_t* p,
unsigned int mem_idx) {
switch(mem_idx) {
case 0: return XED_STATIC_CAST(xed_reg_enum_t,xed3_operand_get_seg0(p));
case 1: return XED_STATIC_CAST(xed_reg_enum_t,xed3_operand_get_seg1(p));
default: xed_assert(mem_idx == 0 || mem_idx == 1);
}
return XED_REG_INVALID;
}
xed_reg_enum_t xed_decoded_inst_get_base_reg(const xed_decoded_inst_t* p,
unsigned int mem_idx) {
switch(mem_idx) {
case 0: return XED_STATIC_CAST(xed_reg_enum_t,xed3_operand_get_base0(p));
case 1: return XED_STATIC_CAST(xed_reg_enum_t,xed3_operand_get_base1(p));
default: xed_assert(mem_idx == 0 || mem_idx == 1);
}
return XED_REG_INVALID;
}
xed_reg_enum_t xed_decoded_inst_get_index_reg(const xed_decoded_inst_t* p,
unsigned int mem_idx) {
switch(mem_idx) {
case 0: return XED_STATIC_CAST(xed_reg_enum_t,xed3_operand_get_index(p));
case 1: return XED_REG_INVALID;
default: xed_assert(mem_idx == 0 || mem_idx == 1);
}
return XED_REG_INVALID;
}
xed_uint_t xed_decoded_inst_get_scale(const xed_decoded_inst_t* p,
unsigned int mem_idx) {
switch(mem_idx) {
case 0: return xed3_operand_get_scale(p);
case 1: return 1;
default: xed_assert(mem_idx == 0 || mem_idx == 1);
}
return XED_REG_INVALID;
}
xed_bool_t xed_decoded_inst_mem_read(const xed_decoded_inst_t* p,
unsigned int mem_idx) {
const xed_inst_t* inst = p->_inst;
const unsigned int noperands = xed_inst_noperands(inst);
unsigned int i;
for( i=0;i<noperands;i++) {
const xed_operand_t* o = xed_inst_operand(inst,i);
if ((mem_idx == 0 && xed_operand_name(o) == XED_OPERAND_MEM0) ||
(mem_idx == 1 && xed_operand_name(o) == XED_OPERAND_MEM1))
switch(xed_decoded_inst_operand_action(p,i))
{
case XED_OPERAND_ACTION_RW:
case XED_OPERAND_ACTION_R:
case XED_OPERAND_ACTION_RCW:
case XED_OPERAND_ACTION_CRW:
case XED_OPERAND_ACTION_CR:
return 1;
default:
break;
}
}
return 0;
}
xed_bool_t
xed_decoded_inst_mem_written(const xed_decoded_inst_t* p,
unsigned int mem_idx)
{
const xed_inst_t* inst = p->_inst;
const unsigned int noperands = xed_inst_noperands(inst);
unsigned int i;
for( i=0;i<noperands;i++)
{
const xed_operand_t* o = xed_inst_operand(inst,i);
if ((mem_idx == 0 && xed_operand_name(o) == XED_OPERAND_MEM0) ||
(mem_idx == 1 && xed_operand_name(o) == XED_OPERAND_MEM1))
switch(xed_decoded_inst_operand_action(p,i))
{
case XED_OPERAND_ACTION_RW:
case XED_OPERAND_ACTION_W:
case XED_OPERAND_ACTION_RCW:
case XED_OPERAND_ACTION_CRW:
case XED_OPERAND_ACTION_CW:
return 1;
default:
break;
}
}
return 0;
}
xed_bool_t
xed_decoded_inst_conditionally_writes_registers(const xed_decoded_inst_t* p )
{
const xed_inst_t* inst = p->_inst;
const unsigned int noperands = xed_inst_noperands(inst);
unsigned int i;
for( i=0;i<noperands;i++) {
switch(xed_decoded_inst_operand_action(p,i)) {
case XED_OPERAND_ACTION_RCW:
case XED_OPERAND_ACTION_CW:
return 1;
default:
break;
}
}
return 0;
}
xed_bool_t
xed_decoded_inst_mem_written_only(const xed_decoded_inst_t* p,
unsigned int mem_idx)
{
const xed_inst_t* inst = p->_inst;
const unsigned int noperands = xed_inst_noperands(inst);
unsigned int i;
for( i=0;i<noperands;i++)
{
const xed_operand_t* o = xed_inst_operand(inst,i);
if ((mem_idx == 0 && xed_operand_name(o) == XED_OPERAND_MEM0) ||
(mem_idx == 1 && xed_operand_name(o) == XED_OPERAND_MEM1))
switch(xed_decoded_inst_operand_action(p,i))
{
case XED_OPERAND_ACTION_W:
case XED_OPERAND_ACTION_CW:
return 1;
default:
break;
}
}
return 0;
}
static XED_INLINE unsigned int
xed_decoded_inst_get_find_memop(const xed_decoded_inst_t* p,
xed_uint_t memop_idx) {
const xed_inst_t* inst = p->_inst;
const unsigned int noperands = xed_inst_noperands(inst);
unsigned int i;
for( i=0;i<noperands;i++) { //FIXME: slow scan...
const xed_operand_t* o = xed_inst_operand(inst,i);
const xed_operand_enum_t op_name = xed_operand_name(o);
if ((memop_idx == 0 && op_name == XED_OPERAND_MEM0) ||
(memop_idx == 0 && op_name == XED_OPERAND_AGEN) ||
(memop_idx == 1 && op_name == XED_OPERAND_MEM1)) {
return i;
}
}
xed_assert(0);
return 0;
}
unsigned int
xed_decoded_inst_get_memop_address_width(const xed_decoded_inst_t* p,
xed_uint_t memop_idx) {
/* Return the addressing width for memop_idx (0 or 1).
*
* In 16/32b modes, if the memop is an implicit stack reference, then
* use the stack addressing width as the effective address size for
* that memop. This DOES NOT include base references that use rSP/rBP
* which implicitly have SS as their stack selector (if not
* overridden).
*
* In 64b mode, the effective addressing width of a reference that
* would use the stack is 64b.
*/
const xed_inst_t* inst = p->_inst;
const xed_uint32_t i = xed_decoded_inst_get_find_memop(p,memop_idx);
const xed_operand_t* o = xed_inst_operand(inst,i);
const xed_operand_width_enum_t width = xed_operand_width(o);
xed_uint32_t bits;
if ( width == XED_OPERAND_WIDTH_SSZ ||
width == XED_OPERAND_WIDTH_SPW ||
width == XED_OPERAND_WIDTH_SPW2 ||
width == XED_OPERAND_WIDTH_SPW3 ||
width == XED_OPERAND_WIDTH_SPW8 ) {
bits=xed_operand_values_get_stack_address_width(
xed_decoded_inst_operands_const(p));
}
else {
bits=xed_operand_values_get_effective_address_width(
xed_decoded_inst_operands_const(p));
}
return bits;
}
static XED_INLINE unsigned int
xed_decoded_inst_get_operand_width_bits(const xed_decoded_inst_t* p,
const xed_operand_t* o ) {
const xed_operand_width_enum_t width = xed_operand_width(o);
unsigned int bits=0;
if (width == XED_OPERAND_WIDTH_SSZ) {
bits=xed_operand_values_get_stack_address_width(
xed_decoded_inst_operands_const(p));
}
else if (width == XED_OPERAND_WIDTH_ASZ) {
bits=xed_operand_values_get_effective_address_width(
xed_decoded_inst_operands_const(p));
}
else {
const xed_uint32_t eosz = xed3_operand_get_eosz(p);
xed_assert(width < XED_OPERAND_WIDTH_LAST);
xed_assert(eosz <= 3);
bits = xed_width_bits[width][eosz];
}
return bits;
}
static xed_uint32_t
xed_decoded_inst_compute_variable_width(const xed_decoded_inst_t* p,
const xed_operand_t* o) {
const xed_uint32_t nelem = xed3_operand_get_nelem(p);
const xed_uint32_t element_size = xed3_operand_get_element_size(p);
(void)o; // pacify compiler
return nelem*element_size;
}
unsigned int
xed_decoded_inst_compute_memory_operand_length(const xed_decoded_inst_t* p,
unsigned int memop_idx) {
const xed_inst_t* inst = p->_inst;
const unsigned int i = xed_decoded_inst_get_find_memop(p, memop_idx);
const xed_operand_t* o = xed_inst_operand(inst,i);
xed_uint32_t bits = xed_decoded_inst_get_operand_width_bits(p,o);
if (bits)
return bits>>3;
bits = xed_decoded_inst_compute_variable_width(p,o);
return bits>>3;
}
// returns bytes
unsigned int
xed_decoded_inst_get_memory_operand_length(const xed_decoded_inst_t* p,
unsigned int memop_idx)
{
if (xed_decoded_inst_number_of_memory_operands(p) > memop_idx)
return xed_decoded_inst_compute_memory_operand_length(p,memop_idx);
return 0;
}
static xed_uint32_t
xed_decoded_inst_operand_length_bits_register(
const xed_decoded_inst_t* p,
unsigned int operand_index)
{
xed_uint32_t mode = 0;
xed_uint_t idx = 0; // default for 16b and 32b
const xed_inst_t* inst = p->_inst;
const xed_operand_t* o = xed_inst_operand(inst,operand_index);
xed_operand_enum_t op_name = xed_operand_name(o);
xed_reg_enum_t r;
/* some registers have a special width specified */
const xed_operand_width_enum_t width = xed_operand_width(o);
if (width != XED_OPERAND_WIDTH_INVALID)
return xed_decoded_inst_get_operand_width_bits(p,o);
r = xed_decoded_inst_get_reg(p,op_name);
mode = xed_decoded_inst_get_machine_mode_bits(p);
if (mode == 64)
idx = 1;
if (r < XED_REG_LAST)
return xed_reg_width_bits[r][idx];
xed_assert(r < XED_REG_LAST);
return 0;
}
unsigned int
xed_decoded_inst_operand_length_bits(
const xed_decoded_inst_t* p,
unsigned int operand_index)
{
const xed_inst_t* inst = p->_inst;
const unsigned int noperands = xed_inst_noperands(inst);
const xed_operand_t* o = xed_inst_operand(inst,operand_index);
xed_operand_enum_t op_name;
xed_uint32_t len;
if (noperands <= operand_index)
return 0;
op_name = xed_operand_name(o);
if (xed_operand_template_is_register(o)) {
len= xed_decoded_inst_operand_length_bits_register( p,operand_index);
return len;
}
else if (op_name == XED_OPERAND_AGEN) {
len=xed_operand_values_get_effective_address_width(
xed_decoded_inst_operands_const(p));
return len;
}
// MEM0, MEM1,PTR, IMM0, IMM1, RELBR, and ABSBR use the width codes now.
// use the "scalable" width codes from the operand template.
len = xed_decoded_inst_get_operand_width_bits(p,o);
if (len)
return len;
// variable width stuff must compute it based on nelem * element_size
len = xed_decoded_inst_compute_variable_width(p,o);
return len;
}
unsigned int xed_decoded_inst_operand_length(const xed_decoded_inst_t* p,
unsigned int operand_index) {
unsigned int bits = xed_decoded_inst_operand_length_bits(p, operand_index);
return bits >> 3;
}
/*******************************************************************/
// The number of elements in the operand
unsigned int xed_decoded_inst_operand_elements(const xed_decoded_inst_t* p,
unsigned int operand_index)
{
unsigned int nelem = 1;
const xed_inst_t* inst = p->_inst;
const unsigned int noperands = xed_inst_noperands(inst);
const xed_operand_t* o = xed_inst_operand(inst,operand_index);
xed_operand_width_enum_t width;
xed_operand_element_xtype_enum_t xtype;
const xed_operand_type_info_t* q;
if ( operand_index >= noperands )
return 0;
width = xed_operand_width(o);
if ( width >= XED_OPERAND_WIDTH_LAST)
return 0;
xtype = xed_operand_xtype(o);
if ( xtype >= XED_OPERAND_XTYPE_LAST)
return 0;
q = xed_operand_xtype_info+xtype;
if (q->bits_per_element) {
const xed_uint_t bits =
xed_decoded_inst_operand_length_bits(p, operand_index);
nelem = bits / q->bits_per_element;
}
else if (q->dtype == XED_OPERAND_ELEMENT_TYPE_STRUCT) {
nelem = 1;
}
else if (q->dtype == XED_OPERAND_ELEMENT_TYPE_VARIABLE) {
nelem = xed3_operand_get_nelem(p);
}
else { // XED_OPERAND_ELEMENT_TYPE_INT, XED_OPERAND_ELEMENT_TYPE_UINT
nelem = 1;
}
return nelem;
}
unsigned int
xed_decoded_inst_operand_element_size_bits(
const xed_decoded_inst_t* p,
unsigned int operand_index)
{
unsigned int element_size = 0;
const xed_inst_t* inst = p->_inst;
const xed_operand_t* o = xed_inst_operand(inst,operand_index);
const xed_operand_element_xtype_enum_t xtype = xed_operand_xtype(o);
const xed_operand_type_info_t* q;
if ( xtype >= XED_OPERAND_XTYPE_LAST)
return 0;
q = xed_operand_xtype_info+xtype;
if (q->bits_per_element) {
element_size = q->bits_per_element;
}
else if ( q->dtype == XED_OPERAND_ELEMENT_TYPE_STRUCT ||
q->dtype == XED_OPERAND_ELEMENT_TYPE_INT ||
q->dtype == XED_OPERAND_ELEMENT_TYPE_UINT ) {
element_size = xed_decoded_inst_operand_length_bits(p, operand_index);
}
else if (q->dtype == XED_OPERAND_ELEMENT_TYPE_VARIABLE) {
element_size = xed3_operand_get_element_size(p);
}
else if (xed_operand_template_is_register(o)) {
return xed_decoded_inst_operand_length_bits_register(p, operand_index);
}
return element_size;
}
xed_operand_element_type_enum_t
xed_decoded_inst_operand_element_type(const xed_decoded_inst_t* p,
unsigned int operand_index)
{
xed_operand_element_type_enum_t dtype = XED_OPERAND_ELEMENT_TYPE_INVALID;
const xed_inst_t* inst = p->_inst;
const unsigned int noperands = xed_inst_noperands(inst);
const xed_operand_t* o = xed_inst_operand(inst,operand_index);
xed_operand_width_enum_t width;
xed_operand_element_xtype_enum_t xtype;
if ( operand_index >= noperands )
return dtype;
width = xed_operand_width(o);
if ( width >= XED_OPERAND_WIDTH_LAST)
return dtype;
xtype = xed_operand_xtype(o);
if ( xtype < XED_OPERAND_XTYPE_LAST) {
const xed_operand_type_info_t* q = xed_operand_xtype_info+xtype;
dtype = q->dtype;
/* This is a catch case for the register NTs that do not have
type codes. It is not 100% accurate */
if (dtype == XED_OPERAND_ELEMENT_TYPE_INVALID)
return XED_OPERAND_ELEMENT_TYPE_INT;
}
return dtype;
}
/*******************************************************************/
xed_bool_t
xed_decoded_inst_uses_rflags(const xed_decoded_inst_t* q)
{
const xed_simple_flag_t* p = xed_decoded_inst_get_rflags_info(q);
if (p && xed_simple_flag_get_nflags(p) > 0 )
return 1;
return 0;
}
static xed_uint8_t
xed_decoded_inst__compute_masked_immediate( const xed_decoded_inst_t* p)
{
xed_uint8_t imm_byte;
xed_uint8_t masked_imm_byte;
xed_uint8_t mask = 0x1F;
if (xed_operand_values_get_effective_operand_width(p) == 64)
mask = 0x3F;
xed_assert(xed3_operand_get_imm_width(p) == 8);
imm_byte = XED_STATIC_CAST(xed_uint8_t,xed3_operand_get_uimm0(p));
masked_imm_byte = imm_byte & mask;
return masked_imm_byte;
}
const xed_simple_flag_t*
xed_decoded_inst_get_rflags_info(const xed_decoded_inst_t* q)
{
xed_uint32_t complex_simple_index;
const xed_complex_flag_t* p;
// no flags
const xed_inst_t* inst = xed_decoded_inst_inst(q);
xed_uint32_t t_index = inst->_flag_info_index;
if(t_index == 0)
return 0;
// simple
if (inst->_flag_complex==0)
return xed_flags_simple_table+t_index;
// complex
complex_simple_index=0;
p = xed_flags_complex_table + t_index;
if (p->check_rep)
{
if (xed_operand_values_has_real_rep(q))
complex_simple_index = p->cases[XED_FLAG_CASE_HAS_REP];
else
complex_simple_index = p->cases[XED_FLAG_CASE_NO_REP];
}
else if (p->check_imm)
{
xed_uint8_t masked_imm_byte =
xed_decoded_inst__compute_masked_immediate(q);
if (masked_imm_byte == 0)
complex_simple_index = p->cases[XED_FLAG_CASE_IMMED_ZERO];
else if (masked_imm_byte == 1)
complex_simple_index = p->cases[XED_FLAG_CASE_IMMED_ONE];
else
complex_simple_index = p->cases[XED_FLAG_CASE_IMMED_OTHER];
}
else
xed_assert(0);
if (complex_simple_index == 0)
return 0;
return xed_flags_simple_table+complex_simple_index;
}
xed_bool_t
xed_decoded_inst_is_prefetch(const xed_decoded_inst_t* p)
{
return xed_decoded_inst_get_attribute(p, XED_ATTRIBUTE_PREFETCH);
}
xed_uint_t
xed_decoded_inst_number_of_memory_operands(const xed_decoded_inst_t* p) {
return (xed3_operand_get_mem0(p) +
xed3_operand_get_mem1(p) + xed3_operand_get_agen(p));
}
//////////////////////////////////////////////////////////////////////////
// Modifying decoded instructions before re-encoding
void xed_decoded_inst_set_scale(xed_decoded_inst_t* p, xed_uint_t scale) {
xed3_operand_set_scale(p,scale);
}
void xed_decoded_inst_set_memory_displacement(xed_decoded_inst_t* p,
xed_int64_t disp,
xed_uint_t length_bytes) {
xed_operand_values_set_memory_displacement(p, disp, length_bytes);
}
void xed_decoded_inst_set_branch_displacement(xed_decoded_inst_t* p,
xed_int64_t disp,
xed_uint_t length_bytes) {
xed_operand_values_set_branch_displacement(p, disp, length_bytes);
}
void xed_decoded_inst_set_immediate_signed(xed_decoded_inst_t* p,
xed_int32_t x,
xed_uint_t length_bytes) {
xed_operand_values_set_immediate_signed(p, x,length_bytes);
}
void xed_decoded_inst_set_immediate_unsigned(xed_decoded_inst_t* p,
xed_uint64_t x,
xed_uint_t length_bytes) {
xed_operand_values_set_immediate_unsigned(p, x,length_bytes);
}
////////
void xed_decoded_inst_set_memory_displacement_bits(xed_decoded_inst_t* p,
xed_int64_t disp,
xed_uint_t length_bits) {
xed_operand_values_set_memory_displacement_bits(p, disp, length_bits);
}
void xed_decoded_inst_set_branch_displacement_bits(xed_decoded_inst_t* p,
xed_int64_t disp,
xed_uint_t length_bits) {
xed_operand_values_set_branch_displacement_bits(p, disp, length_bits);
}
void xed_decoded_inst_set_immediate_signed_bits(xed_decoded_inst_t* p,
xed_int32_t x,
xed_uint_t length_bits) {
xed_operand_values_set_immediate_signed_bits(p, x,length_bits);
}
void xed_decoded_inst_set_immediate_unsigned_bits(xed_decoded_inst_t* p,
xed_uint64_t x,
xed_uint_t length_bits) {
xed_operand_values_set_immediate_unsigned_bits(p, x,length_bits);
}
////////////////////////////////////////////////////////////////////////////
xed_uint32_t xed_decoded_inst_get_operand_width(const xed_decoded_inst_t* p) {
if (xed_decoded_inst_get_attribute(p, XED_ATTRIBUTE_BYTEOP))
return 8;
return xed_operand_values_get_effective_operand_width(p);
}
xed_bool_t
xed_decoded_inst_valid_for_chip(xed_decoded_inst_t const* const p,
xed_chip_enum_t chip)
{
xed_isa_set_enum_t isa_set;
isa_set = xed_decoded_inst_get_isa_set( p);
return xed_isa_set_is_valid_for_chip(isa_set, chip);
}
xed_uint_t
xed_decoded_inst_vector_length_bits(xed_decoded_inst_t const* const p)
{
xed_uint_t vl_bits=0;
#if defined(XED_AVX)
xed_uint_t vl_encoded;
// only valid for VEX, EVEX (and XOP) instructions.
if (xed3_operand_get_vexvalid(p) == 0)
return 0;
/* vl_encoded 0=128,1=256,2=512*/
vl_encoded = xed3_operand_get_vl(p);
/* 0->128, 1->256, 2->512 */
vl_bits = 1 << (vl_encoded+7);
#endif
(void)p; // pacify (msvs) compiler for noavx builds
return vl_bits;
}
xed_bool_t
xed_decoded_inst_masked_vector_operation(xed_decoded_inst_t* p)
{
// pre-evex masked operations
xed_uint32_t maskop =
xed_decoded_inst_get_attribute(p, XED_ATTRIBUTE_MASKOP);
if (maskop)
return 1;
// if evex, and not k0, and not mask-as-control, then report it as a
// masked operation. Evex operations that mask-as-control are a
// different kind of masked operation.
if (xed_decoded_inst_get_attribute(p, XED_ATTRIBUTE_MASKOP_EVEX) &&
!xed_decoded_inst_get_attribute(p, XED_ATTRIBUTE_MASK_AS_CONTROL))
{
const xed_uint_t write_mask_operand = 1;
const xed_operand_t* op = xed_inst_operand(p->_inst, write_mask_operand);
xed_operand_enum_t op_name = xed_operand_name(op);
if (op_name == XED_OPERAND_REG0 || op_name == XED_OPERAND_REG1) {
xed_reg_enum_t r = xed_decoded_inst_get_reg(p, op_name);
if (xed_reg_class(r) == XED_REG_CLASS_MASK) {
if (r != XED_REG_K0)
return 1;
}
}
}
return 0;
}
xed_uint_t
xed_decoded_inst_get_nprefixes(const xed_decoded_inst_t* p) {
return xed3_operand_get_nprefixes(p);
}
xed_bool_t xed_decoded_inst_masking(const xed_decoded_inst_t* p) {
#if defined(XED_SUPPORTS_AVX512)
if (xed3_operand_get_mask(p) != 0)
return 1;
#endif
(void)p; //pacify compiler
return 0;
}
xed_bool_t xed_decoded_inst_merging(const xed_decoded_inst_t* p) {
#if defined(XED_SUPPORTS_AVX512)
if (xed3_operand_get_mask(p) != 0)
{
const xed_inst_t* xi = xed_decoded_inst_inst(p);
const xed_operand_t* op = xed_inst_operand(xi,0); // 0'th operand.
if (xed_operand_width(op) == XED_OPERAND_WIDTH_MSKW) // mask dest -> always zeroing
return 0;
if (xed3_operand_get_zeroing(p) == 0)
if (!xed_decoded_inst_get_attribute(p, XED_ATTRIBUTE_MASK_AS_CONTROL))
return 1;
}
#endif
(void)p; //pacify compiler
return 0;
}
xed_bool_t xed_decoded_inst_zeroing(const xed_decoded_inst_t* p) {
#if defined(XED_SUPPORTS_AVX512)
if (xed3_operand_get_mask(p) != 0)
if (xed3_operand_get_zeroing(p) == 1)
return 1;
#endif
(void)p; //pacify compiler
return 0;
}
xed_uint_t xed_decoded_inst_avx512_dest_elements(const xed_decoded_inst_t* p) {
#if defined(XED_SUPPORTS_AVX512)
if (xed_decoded_inst_get_attribute(p, XED_ATTRIBUTE_SIMD_SCALAR))
return 1;
if (xed_decoded_inst_get_attribute(p, XED_ATTRIBUTE_MASKOP_EVEX)) {
const xed_inst_t* xi = xed_decoded_inst_inst(p);
const xed_operand_t* op = xed_inst_operand(xi,0); // 0'th operand.
if (xed_operand_width(op) == XED_OPERAND_WIDTH_MSKW) {
// need to use source vector or memop to find width (VFPCLASS, VCMP{PS,PD}
xed_uint_t vl_bits = xed_decoded_inst_vector_length_bits(p);
xed_uint_t source_operand_element_bits = xed_decoded_inst_operand_element_size_bits(p,2); // a bit of a hack
if (source_operand_element_bits)
return vl_bits / source_operand_element_bits;
return 0;
}
xed_uint_t vl_dest_bits = xed_decoded_inst_operand_length_bits(p,0);
xed_uint_t dest_element_bits = xed_decoded_inst_operand_element_size_bits(p,0);
if (dest_element_bits)
return vl_dest_bits / dest_element_bits;
}
#else
(void)p; // satisfy compiler
#endif
return 0;
}
xed_operand_action_enum_t
xed_decoded_inst_operand_action(const xed_decoded_inst_t* p,
unsigned int operand_index)
{
/* For the 0th operand, except for stores and except if attribute MASK_AS_CONTROL
RW W <<< SDM/XED notion
===========================================
aaa=0 control n/a w
aaa=0 merging rw w
aaa=0 zeroing n/a n/a
aaa!=0 control n/a w
aaa!=0 merging r+cw r+cw <<< This one requires special handling
aaa!=0 zeroing r+w w
special case: things that write mask reg dests have EVEX.z==0 but never merge.
*/
const xed_inst_t* xi = xed_decoded_inst_inst(p);
const xed_operand_t* op = xed_inst_operand(xi,operand_index);
xed_operand_action_enum_t rw = xed_operand_rw(op);
if (operand_index == 0)
{
if ( xed_decoded_inst_masking(p) &&
xed_decoded_inst_merging(p) )
{
if (rw == XED_OPERAND_ACTION_RW)
return XED_OPERAND_ACTION_RCW;
//need to filter out stores which do NOT read memory when merging.
if (rw == XED_OPERAND_ACTION_W)
{
const xed_operand_t* zero_op = xed_inst_operand(xi,0);
if (xed_operand_name(zero_op) == XED_OPERAND_MEM0)
return XED_OPERAND_ACTION_CW;
// reflect dest register input dependence when merging
return XED_OPERAND_ACTION_RCW;
}
}
}
return rw;
}
xed_bool_t
xed_decoded_inst_uses_embedded_broadcast(const xed_decoded_inst_t* p)
{
#if defined(XED_SUPPORTS_AVX512)
if (xed_decoded_inst_get_attribute(p, XED_ATTRIBUTE_BROADCAST_ENABLED))
if (xed3_operand_get_bcast(p))
return 1;
#endif
(void) p;
return 0;
}
xed_bool_t
xed_decoded_inst_is_broadcast_instruction(const xed_decoded_inst_t* p)
{
#if defined(XED_AVX) // also reports AVX512 broadcast instr
if (xed_decoded_inst_get_category(p) == XED_CATEGORY_BROADCAST)
return 1;
#endif
(void) p;
return 0;
}
xed_bool_t
xed_decoded_inst_is_broadcast(const xed_decoded_inst_t* p)
{
//FIXME: add an enum for the broadcast values so we can give a better answer here
if (xed_decoded_inst_is_broadcast_instruction(p))
return 1;
if (xed_decoded_inst_uses_embedded_broadcast(p))
return 1;
return 0;
}
xed_bool_t
xed_decoded_inst_is_apx_zu(const xed_decoded_inst_t* p)
{
/* APX-Promoted instructions with ZU(zero-upper) behavior are:
- NDD instructions
- Subgroup of promoted IMUL instructions with 0x69 and 0x6B opcodes
- Subgroup of promoted SETcc instructions */
#if defined(XED_APX)
if (xed_decoded_inst_get_extension(p) == XED_EXTENSION_APXEVEX) {
// APX EVEX instructions
return xed3_operand_get_nd(p);
}
#endif
(void) p;
return 0;
}
|
6148d01c3eb73e4d842c794a6860f530cfe8693b
|
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
|
/SOFTWARE/A64-TERES/linux-a64/drivers/staging/comedi/drivers/das800.c
|
9ce6cbcc7ee8c092e4d21f0037f18cda3a346356
|
[
"Linux-syscall-note",
"GPL-2.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
OLIMEX/DIY-LAPTOP
|
ae82f4ee79c641d9aee444db9a75f3f6709afa92
|
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
|
refs/heads/rel3
| 2023-08-04T01:54:19.483792
| 2023-04-03T07:18:12
| 2023-04-03T07:18:12
| 80,094,055
| 507
| 92
|
Apache-2.0
| 2023-04-03T07:05:59
| 2017-01-26T07:25:50
|
C
|
UTF-8
|
C
| false
| false
| 20,617
|
c
|
das800.c
|
/*
comedi/drivers/das800.c
Driver for Keitley das800 series boards and compatibles
Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
************************************************************************
*/
/*
Driver: das800
Description: Keithley Metrabyte DAS800 (& compatibles)
Author: Frank Mori Hess <fmhess@users.sourceforge.net>
Devices: [Keithley Metrabyte] DAS-800 (das-800), DAS-801 (das-801),
DAS-802 (das-802),
[Measurement Computing] CIO-DAS800 (cio-das800),
CIO-DAS801 (cio-das801), CIO-DAS802 (cio-das802),
CIO-DAS802/16 (cio-das802/16)
Status: works, cio-das802/16 untested - email me if you have tested it
Configuration options:
[0] - I/O port base address
[1] - IRQ (optional, required for timed or externally triggered conversions)
Notes:
IRQ can be omitted, although the cmd interface will not work without it.
All entries in the channel/gain list must use the same gain and be
consecutive channels counting upwards in channel number (these are
hardware limitations.)
I've never tested the gain setting stuff since I only have a
DAS-800 board with fixed gain.
The cio-das802/16 does not have a fifo-empty status bit! Therefore
only fifo-half-full transfers are possible with this card.
*/
/*
cmd triggers supported:
start_src: TRIG_NOW | TRIG_EXT
scan_begin_src: TRIG_FOLLOW
scan_end_src: TRIG_COUNT
convert_src: TRIG_TIMER | TRIG_EXT
stop_src: TRIG_NONE | TRIG_COUNT
*/
#include <linux/interrupt.h>
#include "../comedidev.h"
#include <linux/ioport.h>
#include <linux/delay.h>
#include "8253.h"
#include "comedi_fc.h"
#define DAS800_SIZE 8
#define TIMER_BASE 1000
#define N_CHAN_AI 8 /* number of analog input channels */
/* Registers for the das800 */
#define DAS800_LSB 0
#define FIFO_EMPTY 0x1
#define FIFO_OVF 0x2
#define DAS800_MSB 1
#define DAS800_CONTROL1 2
#define CONTROL1_INTE 0x8
#define DAS800_CONV_CONTROL 2
#define ITE 0x1
#define CASC 0x2
#define DTEN 0x4
#define IEOC 0x8
#define EACS 0x10
#define CONV_HCEN 0x80
#define DAS800_SCAN_LIMITS 2
#define DAS800_STATUS 2
#define IRQ 0x8
#define BUSY 0x80
#define DAS800_GAIN 3
#define CIO_FFOV 0x8 /* cio-das802/16 fifo overflow */
#define CIO_ENHF 0x90 /* cio-das802/16 fifo half full int ena */
#define CONTROL1 0x80
#define CONV_CONTROL 0xa0
#define SCAN_LIMITS 0xc0
#define ID 0xe0
#define DAS800_8254 4
#define DAS800_STATUS2 7
#define STATUS2_HCEN 0x80
#define STATUS2_INTE 0X20
#define DAS800_ID 7
#define DAS802_16_HALF_FIFO_SZ 128
struct das800_board {
const char *name;
int ai_speed;
const struct comedi_lrange *ai_range;
int resolution;
};
static const struct comedi_lrange range_das801_ai = {
9, {
BIP_RANGE(5),
BIP_RANGE(10),
UNI_RANGE(10),
BIP_RANGE(0.5),
UNI_RANGE(1),
BIP_RANGE(0.05),
UNI_RANGE(0.1),
BIP_RANGE(0.01),
UNI_RANGE(0.02)
}
};
static const struct comedi_lrange range_cio_das801_ai = {
9, {
BIP_RANGE(5),
BIP_RANGE(10),
UNI_RANGE(10),
BIP_RANGE(0.5),
UNI_RANGE(1),
BIP_RANGE(0.05),
UNI_RANGE(0.1),
BIP_RANGE(0.005),
UNI_RANGE(0.01)
}
};
static const struct comedi_lrange range_das802_ai = {
9, {
BIP_RANGE(5),
BIP_RANGE(10),
UNI_RANGE(10),
BIP_RANGE(2.5),
UNI_RANGE(5),
BIP_RANGE(1.25),
UNI_RANGE(2.5),
BIP_RANGE(0.625),
UNI_RANGE(1.25)
}
};
static const struct comedi_lrange range_das80216_ai = {
8, {
BIP_RANGE(10),
UNI_RANGE(10),
BIP_RANGE(5),
UNI_RANGE(5),
BIP_RANGE(2.5),
UNI_RANGE(2.5),
BIP_RANGE(1.25),
UNI_RANGE(1.25)
}
};
enum das800_boardinfo {
BOARD_DAS800,
BOARD_CIODAS800,
BOARD_DAS801,
BOARD_CIODAS801,
BOARD_DAS802,
BOARD_CIODAS802,
BOARD_CIODAS80216,
};
static const struct das800_board das800_boards[] = {
[BOARD_DAS800] = {
.name = "das-800",
.ai_speed = 25000,
.ai_range = &range_bipolar5,
.resolution = 12,
},
[BOARD_CIODAS800] = {
.name = "cio-das800",
.ai_speed = 20000,
.ai_range = &range_bipolar5,
.resolution = 12,
},
[BOARD_DAS801] = {
.name = "das-801",
.ai_speed = 25000,
.ai_range = &range_das801_ai,
.resolution = 12,
},
[BOARD_CIODAS801] = {
.name = "cio-das801",
.ai_speed = 20000,
.ai_range = &range_cio_das801_ai,
.resolution = 12,
},
[BOARD_DAS802] = {
.name = "das-802",
.ai_speed = 25000,
.ai_range = &range_das802_ai,
.resolution = 12,
},
[BOARD_CIODAS802] = {
.name = "cio-das802",
.ai_speed = 20000,
.ai_range = &range_das802_ai,
.resolution = 12,
},
[BOARD_CIODAS80216] = {
.name = "cio-das802/16",
.ai_speed = 10000,
.ai_range = &range_das80216_ai,
.resolution = 16,
},
};
struct das800_private {
unsigned int count; /* number of data points left to be taken */
unsigned int divisor1; /* counter 1 value for timed conversions */
unsigned int divisor2; /* counter 2 value for timed conversions */
unsigned int do_bits; /* digital output bits */
bool forever; /* flag that we should take data forever */
};
static void das800_ind_write(struct comedi_device *dev,
unsigned val, unsigned reg)
{
/*
* Select dev->iobase + 2 to be desired register
* then write to that register.
*/
outb(reg, dev->iobase + DAS800_GAIN);
outb(val, dev->iobase + 2);
}
static unsigned das800_ind_read(struct comedi_device *dev, unsigned reg)
{
/*
* Select dev->iobase + 7 to be desired register
* then read from that register.
*/
outb(reg, dev->iobase + DAS800_GAIN);
return inb(dev->iobase + 7);
}
static void das800_enable(struct comedi_device *dev)
{
const struct das800_board *thisboard = comedi_board(dev);
struct das800_private *devpriv = dev->private;
unsigned long irq_flags;
spin_lock_irqsave(&dev->spinlock, irq_flags);
/* enable fifo-half full interrupts for cio-das802/16 */
if (thisboard->resolution == 16)
outb(CIO_ENHF, dev->iobase + DAS800_GAIN);
/* enable hardware triggering */
das800_ind_write(dev, CONV_HCEN, CONV_CONTROL);
/* enable card's interrupt */
das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits, CONTROL1);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
}
static void das800_disable(struct comedi_device *dev)
{
unsigned long irq_flags;
spin_lock_irqsave(&dev->spinlock, irq_flags);
/* disable hardware triggering of conversions */
das800_ind_write(dev, 0x0, CONV_CONTROL);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
}
static int das800_set_frequency(struct comedi_device *dev)
{
struct das800_private *devpriv = dev->private;
int err = 0;
if (i8254_load(dev->iobase + DAS800_8254, 0, 1, devpriv->divisor1, 2))
err++;
if (i8254_load(dev->iobase + DAS800_8254, 0, 2, devpriv->divisor2, 2))
err++;
if (err)
return -1;
return 0;
}
static int das800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct das800_private *devpriv = dev->private;
devpriv->forever = false;
devpriv->count = 0;
das800_disable(dev);
return 0;
}
static int das800_ai_do_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
const struct das800_board *thisboard = comedi_board(dev);
struct das800_private *devpriv = dev->private;
int err = 0;
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_TIMER | TRIG_EXT);
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= cfc_check_trigger_is_unique(cmd->start_src);
err |= cfc_check_trigger_is_unique(cmd->convert_src);
err |= cfc_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->convert_src == TRIG_TIMER)
err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
thisboard->ai_speed);
err |= cfc_check_trigger_arg_min(&cmd->chanlist_len, 1);
err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->convert_src == TRIG_TIMER) {
int tmp = cmd->convert_arg;
/* calculate counter values that give desired timing */
i8253_cascade_ns_to_timer_2div(TIMER_BASE,
&devpriv->divisor1,
&devpriv->divisor2,
&cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
if (tmp != cmd->convert_arg)
err++;
}
if (err)
return 4;
/* check channel/gain list against card's limitations */
if (cmd->chanlist) {
unsigned int chan = CR_CHAN(cmd->chanlist[0]);
unsigned int range = CR_RANGE(cmd->chanlist[0]);
unsigned int next;
int i;
for (i = 1; i < cmd->chanlist_len; i++) {
next = cmd->chanlist[i];
if (CR_CHAN(next) != (chan + i) % N_CHAN_AI) {
dev_err(dev->class_dev,
"chanlist must be consecutive, counting upwards\n");
err++;
}
if (CR_RANGE(next) != range) {
dev_err(dev->class_dev,
"chanlist must all have the same gain\n");
err++;
}
}
}
if (err)
return 5;
return 0;
}
static int das800_ai_do_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
const struct das800_board *thisboard = comedi_board(dev);
struct das800_private *devpriv = dev->private;
struct comedi_async *async = s->async;
unsigned int gain = CR_RANGE(async->cmd.chanlist[0]);
unsigned int start_chan = CR_CHAN(async->cmd.chanlist[0]);
unsigned int end_chan = (start_chan + async->cmd.chanlist_len - 1) % 8;
unsigned int scan_chans = (end_chan << 3) | start_chan;
int conv_bits;
unsigned long irq_flags;
das800_disable(dev);
spin_lock_irqsave(&dev->spinlock, irq_flags);
/* set scan limits */
das800_ind_write(dev, scan_chans, SCAN_LIMITS);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
/* set gain */
if (thisboard->resolution == 12 && gain > 0)
gain += 0x7;
gain &= 0xf;
outb(gain, dev->iobase + DAS800_GAIN);
switch (async->cmd.stop_src) {
case TRIG_COUNT:
devpriv->count = async->cmd.stop_arg * async->cmd.chanlist_len;
devpriv->forever = false;
break;
case TRIG_NONE:
devpriv->forever = true;
devpriv->count = 0;
break;
default:
break;
}
/* enable auto channel scan, send interrupts on end of conversion
* and set clock source to internal or external
*/
conv_bits = 0;
conv_bits |= EACS | IEOC;
if (async->cmd.start_src == TRIG_EXT)
conv_bits |= DTEN;
switch (async->cmd.convert_src) {
case TRIG_TIMER:
conv_bits |= CASC | ITE;
/* set conversion frequency */
if (das800_set_frequency(dev) < 0) {
comedi_error(dev, "Error setting up counters");
return -1;
}
break;
case TRIG_EXT:
break;
default:
break;
}
spin_lock_irqsave(&dev->spinlock, irq_flags);
das800_ind_write(dev, conv_bits, CONV_CONTROL);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
async->events = 0;
das800_enable(dev);
return 0;
}
static unsigned int das800_ai_get_sample(struct comedi_device *dev)
{
unsigned int lsb = inb(dev->iobase + DAS800_LSB);
unsigned int msb = inb(dev->iobase + DAS800_MSB);
return (msb << 8) | lsb;
}
static irqreturn_t das800_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct das800_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s ? s->async : NULL;
unsigned long irq_flags;
unsigned int status;
unsigned int val;
bool fifo_empty;
bool fifo_overflow;
int i;
status = inb(dev->iobase + DAS800_STATUS);
if (!(status & IRQ))
return IRQ_NONE;
if (!dev->attached)
return IRQ_HANDLED;
spin_lock_irqsave(&dev->spinlock, irq_flags);
status = das800_ind_read(dev, CONTROL1) & STATUS2_HCEN;
/*
* Don't release spinlock yet since we want to make sure
* no one else disables hardware conversions.
*/
/* if hardware conversions are not enabled, then quit */
if (status == 0) {
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
return IRQ_HANDLED;
}
for (i = 0; i < DAS802_16_HALF_FIFO_SZ; i++) {
val = das800_ai_get_sample(dev);
if (s->maxdata == 0x0fff) {
fifo_empty = !!(val & FIFO_EMPTY);
fifo_overflow = !!(val & FIFO_OVF);
} else {
/* cio-das802/16 has no fifo empty status bit */
fifo_empty = false;
fifo_overflow = !!(inb(dev->iobase + DAS800_GAIN) &
CIO_FFOV);
}
if (fifo_empty || fifo_overflow)
break;
if (s->maxdata == 0x0fff)
val >>= 4; /* 12-bit sample */
/* if there are more data points to collect */
if (devpriv->count > 0 || devpriv->forever) {
/* write data point to buffer */
cfc_write_to_buffer(s, val & s->maxdata);
devpriv->count--;
}
}
async->events |= COMEDI_CB_BLOCK;
if (fifo_overflow) {
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
das800_cancel(dev, s);
async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
comedi_event(dev, s);
async->events = 0;
return IRQ_HANDLED;
}
if (devpriv->count > 0 || devpriv->forever) {
/* Re-enable card's interrupt.
* We already have spinlock, so indirect addressing is safe */
das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits,
CONTROL1);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
} else {
/* otherwise, stop taking data */
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
das800_disable(dev);
async->events |= COMEDI_CB_EOA;
}
comedi_event(dev, s);
async->events = 0;
return IRQ_HANDLED;
}
static int das800_wait_for_conv(struct comedi_device *dev, int timeout)
{
int i;
for (i = 0; i < timeout; i++) {
if (!(inb(dev->iobase + DAS800_STATUS) & BUSY))
return 0;
}
return -ETIME;
}
static int das800_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct das800_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
unsigned long irq_flags;
unsigned int val;
int ret;
int i;
das800_disable(dev);
/* set multiplexer */
spin_lock_irqsave(&dev->spinlock, irq_flags);
das800_ind_write(dev, chan | devpriv->do_bits, CONTROL1);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
/* set gain / range */
if (s->maxdata == 0x0fff && range)
range += 0x7;
range &= 0xf;
outb(range, dev->iobase + DAS800_GAIN);
udelay(5);
for (i = 0; i < insn->n; i++) {
/* trigger conversion */
outb_p(0, dev->iobase + DAS800_MSB);
ret = das800_wait_for_conv(dev, 1000);
if (ret)
return ret;
val = das800_ai_get_sample(dev);
if (s->maxdata == 0x0fff)
val >>= 4; /* 12-bit sample */
data[i] = val & s->maxdata;
}
return insn->n;
}
static int das800_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[1] = (inb(dev->iobase + DAS800_STATUS) >> 4) & 0x7;
return insn->n;
}
static int das800_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct das800_private *devpriv = dev->private;
unsigned int mask = data[0];
unsigned int bits = data[1];
unsigned long irq_flags;
if (mask) {
s->state &= ~mask;
s->state |= (bits & mask);
devpriv->do_bits = s->state << 4;
spin_lock_irqsave(&dev->spinlock, irq_flags);
das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits,
CONTROL1);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
}
data[1] = s->state;
return insn->n;
}
static int das800_probe(struct comedi_device *dev)
{
const struct das800_board *thisboard = comedi_board(dev);
int board = thisboard ? thisboard - das800_boards : -EINVAL;
int id_bits;
unsigned long irq_flags;
spin_lock_irqsave(&dev->spinlock, irq_flags);
id_bits = das800_ind_read(dev, ID) & 0x3;
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
switch (id_bits) {
case 0x0:
if (board == BOARD_DAS800 || board == BOARD_CIODAS800)
break;
dev_dbg(dev->class_dev, "Board model (probed): DAS-800\n");
board = BOARD_DAS800;
break;
case 0x2:
if (board == BOARD_DAS801 || board == BOARD_CIODAS801)
break;
dev_dbg(dev->class_dev, "Board model (probed): DAS-801\n");
board = BOARD_DAS801;
break;
case 0x3:
if (board == BOARD_DAS802 || board == BOARD_CIODAS802 ||
board == BOARD_CIODAS80216)
break;
dev_dbg(dev->class_dev, "Board model (probed): DAS-802\n");
board = BOARD_DAS802;
break;
default:
dev_dbg(dev->class_dev, "Board model: 0x%x (unknown)\n",
id_bits);
board = -EINVAL;
break;
}
return board;
}
static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct das800_board *thisboard = comedi_board(dev);
struct das800_private *devpriv;
struct comedi_subdevice *s;
unsigned int irq = it->options[1];
unsigned long irq_flags;
int board;
int ret;
devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
if (!devpriv)
return -ENOMEM;
dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], DAS800_SIZE);
if (ret)
return ret;
board = das800_probe(dev);
if (board < 0) {
dev_dbg(dev->class_dev, "unable to determine board type\n");
return -ENODEV;
}
dev->board_ptr = das800_boards + board;
thisboard = comedi_board(dev);
dev->board_name = thisboard->name;
if (irq > 1 && irq <= 7) {
ret = request_irq(irq, das800_interrupt, 0, dev->board_name,
dev);
if (ret == 0)
dev->irq = irq;
}
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
return ret;
/* Analog Input subdevice */
s = &dev->subdevices[0];
dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = 8;
s->maxdata = (1 << thisboard->resolution) - 1;
s->range_table = thisboard->ai_range;
s->insn_read = das800_ai_insn_read;
if (dev->irq) {
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = 8;
s->do_cmdtest = das800_ai_do_cmdtest;
s->do_cmd = das800_ai_do_cmd;
s->cancel = das800_cancel;
}
/* Digital Input subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 3;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = das800_di_insn_bits;
/* Digital Output subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = das800_do_insn_bits;
das800_disable(dev);
/* initialize digital out channels */
spin_lock_irqsave(&dev->spinlock, irq_flags);
das800_ind_write(dev, CONTROL1_INTE | devpriv->do_bits, CONTROL1);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
return 0;
};
static struct comedi_driver driver_das800 = {
.driver_name = "das800",
.module = THIS_MODULE,
.attach = das800_attach,
.detach = comedi_legacy_detach,
.num_names = ARRAY_SIZE(das800_boards),
.board_name = &das800_boards[0].name,
.offset = sizeof(struct das800_board),
};
module_comedi_driver(driver_das800);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
|
10e238a5e1cde5e2a0b6a848d1d7a9598d0c96b8
|
c0bfd93cd7f26a271268e504959256f1e02c6806
|
/components/freemodbus/common/esp_modbus_slave.c
|
57a54efe3cd8f63f12c305fe0e5cf87ce247e77c
|
[
"Apache-2.0"
] |
permissive
|
espressif/ESP8266_RTOS_SDK
|
606f396e92d2675d9854f0fabd88587fbbbaf267
|
af0cdc36fa2600033d0a09301c754008cf1503c1
|
refs/heads/master
| 2023-08-24T22:40:15.373553
| 2023-05-06T02:04:24
| 2023-05-06T02:04:24
| 27,584,181
| 3,163
| 1,749
|
Apache-2.0
| 2023-08-09T10:48:13
| 2014-12-05T09:27:12
|
C
|
UTF-8
|
C
| false
| false
| 9,186
|
c
|
esp_modbus_slave.c
|
/* Copyright 2018 Espressif Systems (Shanghai) PTE LTD
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "esp_err.h" // for esp_err_t
#include "sdkconfig.h" // for KConfig defines
#include "mbc_slave.h" // for slave private type definitions
#include "esp_modbus_common.h" // for common defines
#include "esp_modbus_slave.h" // for public slave defines
#include "esp_modbus_callbacks.h" // for modbus callbacks function pointers declaration
#ifdef CONFIG_FMB_CONTROLLER_SLAVE_ID_SUPPORT
#define MB_ID_BYTE0(id) ((uint8_t)(id))
#define MB_ID_BYTE1(id) ((uint8_t)(((uint16_t)(id) >> 8) & 0xFF))
#define MB_ID_BYTE2(id) ((uint8_t)(((uint32_t)(id) >> 16) & 0xFF))
#define MB_ID_BYTE3(id) ((uint8_t)(((uint32_t)(id) >> 24) & 0xFF))
#define MB_CONTROLLER_SLAVE_ID (CONFIG_FMB_CONTROLLER_SLAVE_ID)
#define MB_SLAVE_ID_SHORT (MB_ID_BYTE3(MB_CONTROLLER_SLAVE_ID))
// Slave ID constant
static uint8_t mb_slave_id[] = { MB_ID_BYTE0(MB_CONTROLLER_SLAVE_ID),
MB_ID_BYTE1(MB_CONTROLLER_SLAVE_ID),
MB_ID_BYTE2(MB_CONTROLLER_SLAVE_ID) };
#endif
// Common interface pointer for slave port
static mb_slave_interface_t* slave_interface_ptr = NULL;
void mbc_slave_init_iface(void* handler)
{
slave_interface_ptr = (mb_slave_interface_t*) handler;
}
/**
* Modbus controller destroy function
*/
esp_err_t mbc_slave_destroy(void)
{
esp_err_t error = ESP_OK;
// Is initialization done?
MB_SLAVE_CHECK((slave_interface_ptr != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
// Check if interface has been initialized
MB_SLAVE_CHECK((slave_interface_ptr->destroy != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
// Call the slave port destroy function
error = slave_interface_ptr->destroy();
MB_SLAVE_CHECK((error == ESP_OK),
ESP_ERR_INVALID_STATE,
"Slave destroy failure error=(0x%x).",
error);
return error;
}
/**
* Setup Modbus controller parameters
*/
esp_err_t mbc_slave_setup(void* comm_info)
{
esp_err_t error = ESP_OK;
MB_SLAVE_CHECK((slave_interface_ptr != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
MB_SLAVE_CHECK((slave_interface_ptr->setup != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
error = slave_interface_ptr->setup(comm_info);
MB_SLAVE_CHECK((error == ESP_OK),
ESP_ERR_INVALID_STATE,
"Slave setup failure error=(0x%x).",
error);
return error;
}
/**
* Start Modbus controller start function
*/
esp_err_t mbc_slave_start(void)
{
esp_err_t error = ESP_OK;
MB_SLAVE_CHECK((slave_interface_ptr != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
MB_SLAVE_CHECK((slave_interface_ptr->start != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
#ifdef CONFIG_FMB_CONTROLLER_SLAVE_ID_SUPPORT
// Set the slave ID if the KConfig option is selected
eMBErrorCode status = eMBSetSlaveID(MB_SLAVE_ID_SHORT, TRUE, (UCHAR*)mb_slave_id, sizeof(mb_slave_id));
MB_SLAVE_CHECK((status == MB_ENOERR), ESP_ERR_INVALID_STATE, "mb stack set slave ID failure.");
#endif
error = slave_interface_ptr->start();
MB_SLAVE_CHECK((error == ESP_OK),
ESP_ERR_INVALID_STATE,
"Slave start failure error=(0x%x).",
error);
return error;
}
/**
* Blocking function to get event on parameter group change for application task
*/
mb_event_group_t mbc_slave_check_event(mb_event_group_t group)
{
MB_SLAVE_CHECK((slave_interface_ptr != NULL),
MB_EVENT_NO_EVENTS,
"Slave interface is not correctly initialized.");
MB_SLAVE_CHECK((slave_interface_ptr->check_event != NULL),
MB_EVENT_NO_EVENTS,
"Slave interface is not correctly initialized.");
mb_event_group_t event = slave_interface_ptr->check_event(group);
return event;
}
/**
* Function to get notification about parameter change from application task
*/
esp_err_t mbc_slave_get_param_info(mb_param_info_t* reg_info, uint32_t timeout)
{
esp_err_t error = ESP_OK;
MB_SLAVE_CHECK((slave_interface_ptr != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
MB_SLAVE_CHECK((slave_interface_ptr->get_param_info != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
error = slave_interface_ptr->get_param_info(reg_info, timeout);
MB_SLAVE_CHECK((error == ESP_OK),
ESP_ERR_INVALID_STATE,
"Slave get parameter info failure error=(0x%x).",
error);
return error;
}
/**
* Function to set area descriptors for modbus parameters
*/
esp_err_t mbc_slave_set_descriptor(mb_register_area_descriptor_t descr_data)
{
esp_err_t error = ESP_OK;
MB_SLAVE_CHECK((slave_interface_ptr != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
MB_SLAVE_CHECK((slave_interface_ptr->set_descriptor != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
error = slave_interface_ptr->set_descriptor(descr_data);
MB_SLAVE_CHECK((error == ESP_OK),
ESP_ERR_INVALID_STATE,
"Slave set descriptor failure error=(0x%x).",
(uint16_t)error);
return error;
}
/**
* Below are stack callback functions to read/write registers
*/
eMBErrorCode eMBRegDiscreteCB(UCHAR * pucRegBuffer, USHORT usAddress,
USHORT usNDiscrete)
{
eMBErrorCode error = MB_ENOERR;
MB_SLAVE_CHECK((slave_interface_ptr != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
MB_SLAVE_CHECK((slave_interface_ptr->slave_reg_cb_discrete != NULL),
error,
"Slave interface is not correctly initialized.");
error = slave_interface_ptr->slave_reg_cb_discrete(pucRegBuffer, usAddress, usNDiscrete);
return error;
}
eMBErrorCode eMBRegCoilsCB(UCHAR* pucRegBuffer, USHORT usAddress,
USHORT usNCoils, eMBRegisterMode eMode)
{
eMBErrorCode error = MB_ENOERR;
MB_SLAVE_CHECK((slave_interface_ptr != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
MB_SLAVE_CHECK((slave_interface_ptr->slave_reg_cb_coils != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
error = slave_interface_ptr->slave_reg_cb_coils(pucRegBuffer, usAddress,
usNCoils, eMode);
return error;
}
eMBErrorCode eMBRegHoldingCB(UCHAR * pucRegBuffer, USHORT usAddress,
USHORT usNRegs, eMBRegisterMode eMode)
{
eMBErrorCode error = MB_ENOERR;
MB_SLAVE_CHECK((slave_interface_ptr != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
MB_SLAVE_CHECK((slave_interface_ptr->slave_reg_cb_holding != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
error = slave_interface_ptr->slave_reg_cb_holding(pucRegBuffer, usAddress,
usNRegs, eMode);
return error;
}
eMBErrorCode eMBRegInputCB(UCHAR * pucRegBuffer, USHORT usAddress,
USHORT usNRegs)
{
eMBErrorCode error = MB_ENOERR;
MB_SLAVE_CHECK((slave_interface_ptr != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
MB_SLAVE_CHECK((slave_interface_ptr->slave_reg_cb_input != NULL),
ESP_ERR_INVALID_STATE,
"Slave interface is not correctly initialized.");
error = slave_interface_ptr->slave_reg_cb_input(pucRegBuffer, usAddress, usNRegs);
return error;
}
|
a6fbe12ef7ecdce342803f581ce292b6a74cc200
|
581bdcc078d282e388f1b655d4cfc4e08152d117
|
/KSystemInformer/dynimp.c
|
eb6703aed7e1384f5bdbd7b1f0b937226ff16449
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"Zlib",
"LGPL-2.1-or-later",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain"
] |
permissive
|
winsiderss/systeminformer
|
774928be871f0055263ac5e62ae0a598b098486b
|
5a6b442acd45d681f699a133d476a3211d072871
|
refs/heads/master
| 2023-08-28T15:43:41.074679
| 2023-08-27T20:59:20
| 2023-08-27T20:59:20
| 50,824,485
| 2,137
| 292
|
MIT
| 2023-09-10T22:35:12
| 2016-02-01T08:10:21
|
C
|
UTF-8
|
C
| false
| false
| 3,881
|
c
|
dynimp.c
|
/*
* Copyright (c) 2022 Winsider Seminars & Solutions, Inc. All rights reserved.
*
* This file is part of System Informer.
*
* Authors:
*
* wj32 2010-2016
* jxy-s 2020-2022
*
*/
#include <kph.h>
#include <dyndata.h>
#include <trace.h>
PAGED_FILE();
/**
* \brief Dynamically imports routines.
*/
_IRQL_requires_max_(PASSIVE_LEVEL)
VOID KphDynamicImport(
VOID
)
{
PAGED_CODE_PASSIVE();
KphDynPsSetLoadImageNotifyRoutineEx = (PPS_SET_LOAD_IMAGE_NOTIFY_ROUTINE_EX)KphGetSystemRoutineAddress(L"PsSetLoadImageNotifyRoutineEx");
KphDynPsSetCreateProcessNotifyRoutineEx2 = (PPS_SET_CREATE_PROCESS_NOTIFY_ROUTINE_EX2)KphGetSystemRoutineAddress(L"PsSetCreateProcessNotifyRoutineEx2");
KphDynMmProtectDriverSection = (PMM_PROTECT_DRIVER_SECTION)KphGetSystemRoutineAddress(L"MmProtectDriverSection");
}
/**
* \brief Retrieves the address of a function exported by NTOS or HAL.
*
* \param SystemRoutineName The name of the function.
*
* \return The address of the function, or NULL if the function could
* not be found.
*/
_IRQL_requires_max_(PASSIVE_LEVEL)
PVOID KphGetSystemRoutineAddress(
_In_z_ PCWSTR SystemRoutineName
)
{
UNICODE_STRING systemRoutineName;
PAGED_CODE_PASSIVE();
RtlInitUnicodeString(&systemRoutineName, SystemRoutineName);
return MmGetSystemRoutineAddress(&systemRoutineName);
}
/**
* \brief Retrieves the address of a function using the exported module list.
* Caller must guarantee that PsLoadedModuleList and PsLoadedModuleResource
* is available prior to this call.
*
* \param ModuleName The name of the module to retrieve the function from.
* \param RoutineName The name of the routine to retrieve.
*
* \return The address of the function, or NULL if the function could
* not be found.
*/
_IRQL_requires_max_(PASSIVE_LEVEL)
PVOID KphpGetRoutineAddressByModuleList(
_In_z_ PCWSTR ModuleName,
_In_z_ PCSTR RoutineName
)
{
PVOID routine;
UNICODE_STRING moduleName;
PAGED_CODE_PASSIVE();
routine = NULL;
RtlInitUnicodeString(&moduleName, ModuleName);
KeEnterCriticalRegion();
if (!ExAcquireResourceSharedLite(PsLoadedModuleResource, TRUE))
{
KeLeaveCriticalRegion();
KphTracePrint(TRACE_LEVEL_ERROR,
GENERAL,
"Failed to acquire PsLoadedModuleResource to "
"get routine %ls!%hs",
ModuleName,
RoutineName);
return routine;
}
for (PLIST_ENTRY link = PsLoadedModuleList->Flink;
link != PsLoadedModuleList;
link = link->Flink)
{
PKLDR_DATA_TABLE_ENTRY entry;
entry = CONTAINING_RECORD(link, KLDR_DATA_TABLE_ENTRY, InLoadOrderLinks);
if (!RtlEqualUnicodeString(&entry->BaseDllName, &moduleName, TRUE))
{
continue;
}
routine = RtlFindExportedRoutineByName(entry->DllBase, RoutineName);
if (routine)
{
break;
}
}
ExReleaseResourceLite(PsLoadedModuleResource);
KeLeaveCriticalRegion();
if (!routine)
{
KphTracePrint(TRACE_LEVEL_WARNING,
GENERAL,
"Failed to find routine %ls!%hs",
ModuleName,
RoutineName);
}
return routine;
}
/**
* \brief Retrieves the address of a function.
*
* \param ModuleName The name of the module to retrieve the function from.
* \param RoutineName The name of the routine to retrieve.
*
* \return The address of the function, or NULL if the function could
* not be found.
*/
_IRQL_requires_max_(PASSIVE_LEVEL)
PVOID KphGetRoutineAddress(
_In_z_ PCWSTR ModuleName,
_In_z_ PCSTR RoutineName
)
{
PAGED_CODE_PASSIVE();
return KphpGetRoutineAddressByModuleList(ModuleName, RoutineName);
}
|
1e51482a1cf6afb143a36187ecfdc113e54c2cf6
|
fd8a9edc56239237cd57cf755fc1aae843f35c9d
|
/src/mmh3/_mmh3/murmurhash3.h
|
2b883696ccd1dd9463df232b94a4537c79ccff5e
|
[
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
hajimes/mmh3
|
8786236bba38df332cc5e5d64075e9d66f2cdd16
|
3e62a197d85b483d8b51ad93943bb45ce2b17966
|
refs/heads/master
| 2023-07-20T20:09:50.886965
| 2023-07-14T15:40:38
| 2023-07-14T15:40:38
| 8,124,308
| 279
| 64
|
CC0-1.0
| 2023-03-24T06:04:35
| 2013-02-10T15:48:12
|
C++
|
UTF-8
|
C
| false
| false
| 7,609
|
h
|
murmurhash3.h
|
/***
* This file is under MIT <year> Hajime Senuma, just like other files.
* See LICENSE for details.
*
* It was originally written by Austin Appleby in C++ under the public domain,
* but ported to PEP 7 C for Python 3.6 and later by the mmh3 project.
*
* Any issues should be reported to https://github.com/hajimes/mmh3/issues.
*
* The following is the original public domain notice by Austin Appleby.
*/
//-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#ifndef _MURMURHASH3_H_
#define _MURMURHASH3_H_
// To handle 64-bit data; see https://docs.python.org/3/c-api/arg.html
#ifndef PY_SSIZE_T_CLEAN
#define PY_SSIZE_T_CLEAN
#endif
#include <Python.h>
#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
#include <byteswap.h>
#endif
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER) && (_MSC_VER < 1600)
typedef signed __int8 int8_t;
typedef signed __int32 int32_t;
typedef signed __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
// Other compilers
#else // defined(_MSC_VER)
#include <stdint.h>
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER)
#define FORCE_INLINE __forceinline
#include <stdlib.h>
#define ROTL32(x, y) _rotl(x, y)
#define ROTL64(x, y) _rotl64(x, y)
#define BIG_CONSTANT(x) (x)
// Other compilers
#else // defined(_MSC_VER)
#if ((__GNUC__ > 4) || (__GNUC__ == 4 && GNUC_MINOR >= 4))
/* gcc version >= 4.4 4.1 = RHEL 5, 4.4 = RHEL 6. Don't inline for RHEL 5 gcc
* which is 4.1*/
#define FORCE_INLINE inline __attribute__((always_inline))
#else
#define FORCE_INLINE
#endif
static FORCE_INLINE uint32_t
rotl32(uint32_t x, int8_t r)
{
return (x << r) | (x >> (32 - r));
}
static FORCE_INLINE uint64_t
rotl64(uint64_t x, int8_t r)
{
return (x << r) | (x >> (64 - r));
}
#define ROTL32(x, y) rotl32(x, y)
#define ROTL64(x, y) rotl64(x, y)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
//-----------------------------------------------------------------------------
// Block read - if your platform needs to do endian-swapping or can only
// handle aligned reads, do the conversion here
static FORCE_INLINE uint32_t
getblock32(const uint32_t *p, Py_ssize_t i)
{
#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
return bswap_32(p[i]);
#else
return p[i];
#endif
}
static FORCE_INLINE uint64_t
getblock64(const uint64_t *p, Py_ssize_t i)
{
#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
return bswap_64(p[i]);
#else
return p[i];
#endif
}
//-----------------------------------------------------------------------------
// Building blocks for multiply and rotate (MUR) operations.
// Names are taken from Google Guava's implementation
static FORCE_INLINE uint32_t
mixK1(uint32_t k1)
{
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
k1 *= c1;
k1 = ROTL32(k1, 15);
k1 *= c2;
return k1;
}
static FORCE_INLINE uint32_t
mixH1(uint32_t h1, const uint32_t h2, const uint8_t shift, const uint32_t c1)
{
h1 = ROTL32(h1, shift);
h1 += h2;
h1 = h1 * 5 + c1;
return h1;
}
static FORCE_INLINE uint64_t
mixK_x64_128(uint64_t k1, const uint8_t shift, const uint64_t c1,
const uint64_t c2)
{
k1 *= c1;
k1 = ROTL64(k1, shift);
k1 *= c2;
return k1;
}
static FORCE_INLINE uint64_t
mixK1_x64_128(uint64_t k1)
{
const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
k1 *= c1;
k1 = ROTL64(k1, 31);
k1 *= c2;
return k1;
}
static FORCE_INLINE uint64_t
mixK2_x64_128(uint64_t k2)
{
const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
k2 *= c2;
k2 = ROTL64(k2, 33);
k2 *= c1;
return k2;
}
static FORCE_INLINE uint64_t
mixH_x64_128(uint64_t h1, uint64_t h2, const uint8_t shift, const uint32_t c)
{
h1 = ROTL64(h1, shift);
h1 += h2;
h1 = h1 * 5 + c;
return h1;
}
static FORCE_INLINE uint64_t
mixK_x86_128(uint32_t k, const uint8_t shift, const uint32_t c1,
const uint32_t c2)
{
k *= c1;
k = ROTL32(k, shift);
k *= c2;
return k;
}
//-----------------------------------------------------------------------------
// Finalization mix - force all bits of a hash block to avalanche
static FORCE_INLINE uint32_t
fmix32(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
//----------
static FORCE_INLINE uint64_t
fmix64(uint64_t k)
{
k ^= k >> 33;
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
k ^= k >> 33;
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return k;
}
//-----------------------------------------------------------------------------
// Finalization function
static FORCE_INLINE void
digest_x64_128_impl(uint64_t h1, uint64_t h2, const uint64_t k1,
const uint64_t k2, const Py_ssize_t len, const char *out)
{
h1 ^= mixK1_x64_128(k1);
h2 ^= mixK2_x64_128(k2);
h1 ^= len;
h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix64(h1);
h2 = fmix64(h2);
h1 += h2;
h2 += h1;
#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
((uint64_t *)out)[0] = bswap_64(h1);
((uint64_t *)out)[1] = bswap_64(h2);
#else
((uint64_t *)out)[0] = h1;
((uint64_t *)out)[1] = h2;
#endif
}
static FORCE_INLINE void
digest_x86_128_impl(uint32_t h1, uint32_t h2, uint32_t h3, uint32_t h4,
const uint32_t k1, const uint32_t k2, const uint32_t k3,
const uint32_t k4, const Py_ssize_t len, const char *out)
{
const uint32_t c1 = 0x239b961b;
const uint32_t c2 = 0xab0e9789;
const uint32_t c3 = 0x38b34ae5;
const uint32_t c4 = 0xa1e38b93;
h1 ^= mixK_x86_128(k1, 15, c1, c2);
h2 ^= mixK_x86_128(k2, 16, c2, c3);
h3 ^= mixK_x86_128(k3, 17, c3, c4);
h4 ^= mixK_x86_128(k4, 18, c4, c1);
h1 ^= len;
h2 ^= len;
h3 ^= len;
h4 ^= len;
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
h1 = fmix32(h1);
h2 = fmix32(h2);
h3 = fmix32(h3);
h4 = fmix32(h4);
h1 += h2;
h1 += h3;
h1 += h4;
h2 += h1;
h3 += h1;
h4 += h1;
#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
((uint32_t *)out)[0] = bswap_32(h1);
((uint32_t *)out)[1] = bswap_32(h2);
((uint32_t *)out)[2] = bswap_32(h3);
((uint32_t *)out)[3] = bswap_32(h4);
#else
((uint32_t *)out)[0] = h1;
((uint32_t *)out)[1] = h2;
((uint32_t *)out)[2] = h3;
((uint32_t *)out)[3] = h4;
#endif
}
//-----------------------------------------------------------------------------
void
murmurhash3_x86_32(const void *key, Py_ssize_t len, uint32_t seed, void *out);
void
murmurhash3_x86_128(const void *key, Py_ssize_t len, uint32_t seed, void *out);
void
murmurhash3_x64_128(const void *key, Py_ssize_t len, uint32_t seed, void *out);
//-----------------------------------------------------------------------------
#endif // _MURMURHASH3_H_
|
d2ebb170d1f97e5cee124dccc8e235b91afea486
|
aa3befea459382dc5c01c925653d54f435b3fb0f
|
/fs/spiffs/src/spiffs_check.c
|
dd46e81e15e796d8768e7d6eacdfa31750ce0bb1
|
[
"MIT-open-group",
"BSD-3-Clause",
"HPND-sell-variant",
"BSD-4-Clause-UC",
"LicenseRef-scancode-warranty-disclaimer",
"MIT-0",
"LicenseRef-scancode-bsd-atmel",
"LicenseRef-scancode-gary-s-brown",
"LicenseRef-scancode-proprietary-license",
"SunPro",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-other-permissive",
"HPND",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"GPL-1.0-or-later",
"CC-BY-2.0",
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apache/nuttx
|
14519a7bff4a87935d94fb8fb2b19edb501c7cec
|
606b6d9310fb25c7d92c6f95bf61737e3c79fa0f
|
refs/heads/master
| 2023-08-25T06:55:45.822534
| 2023-08-23T16:03:31
| 2023-08-24T21:25:47
| 228,103,273
| 407
| 241
|
Apache-2.0
| 2023-09-14T18:26:05
| 2019-12-14T23:27:55
|
C
|
UTF-8
|
C
| false
| false
| 82,436
|
c
|
spiffs_check.c
|
/****************************************************************************
* fs/spiffs/src/spiffs_check.c
*
* Copyright (C) 2018 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* This is a port of version 0.3.7 of SPIFFS by Peter Andersion. That
* version was originally released under the MIT license but is here re-
* released under the NuttX BSD license.
*
* Copyright (c) 2013-2017 Peter Andersson (pelleplutt1976@gmail.com)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/* Contains functionality for checking file system consistency
* and mending problems.
* Three levels of consistency checks are implemented:
*
* Look up consistency
* Checks if indices in lookup pages are coherent with page headers
* Object index consistency
* Checks if there are any orphaned object indices (missing object index
* headers).
* If an object index is found but not its header, the object index is
* deleted.
* This is critical for the following page consistency check.
* Page consistency
* Checks for pages that ought to be indexed, ought not to be indexed, are
* multiple indexed
*/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdio.h>
#include <string.h>
#include <debug.h>
#include "spiffs.h"
#include "spiffs_core.h"
#include "spiffs_cache.h"
#include "spiffs_check.h"
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: spiffs_check_get_data_pgndx
*
* Description:
* Searches in the object indices and returns the referenced page index
* given the object ID and the data span index destroys fs->lu_work
*
* Input Parameters:
* fs - A reference to the SPIFFS volume object instance
* objid - Object ID
* data_spndx - Data span index
* pgnx - Page index
* objndx_pgndx - Object index page index
*
* Returned Value:
* Zero (OK) is returned on success; A negated errno value is returned on
* any failure.
*
****************************************************************************/
static int spiffs_check_get_data_pgndx(FAR struct spiffs_s *fs,
int16_t objid, int16_t data_spndx,
FAR int16_t *pgndx,
FAR int16_t *objndx_pgndx)
{
uint32_t addr;
int16_t objndx_spndx;
int ret;
/* Calculate object index span index for given data page span index */
objndx_spndx = SPIFFS_OBJNDX_ENTRY_SPNDX(fs, data_spndx);
/* Find the object index for the object ID and span index */
ret = spiffs_objlu_find_id_and_span(fs, objid | SPIFFS_OBJID_NDXFLAG,
objndx_spndx, 0, objndx_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_objlu_find_id_and_span() failed: %d\n", ret);
return ret;
}
/* Load the object index entry */
addr = SPIFFS_PAGE_TO_PADDR(fs, *objndx_pgndx);
if (objndx_spndx == 0)
{
/* Get the referenced page from object index header */
addr += sizeof(struct spiffs_pgobj_ndxheader_s) +
data_spndx * sizeof(int16_t);
}
else
{
/* Get the referenced page from object index */
addr += sizeof(struct spiffs_page_objndx_s) +
SPIFFS_OBJNDX_ENTRY(fs, data_spndx) *
sizeof(int16_t);
}
/* Read the page from FLASH (or the cache) */
ret = spiffs_cache_read(fs, SPIFFS_OP_T_OBJ_LU2 | SPIFFS_OP_C_READ, 0,
addr, sizeof(int16_t), (FAR uint8_t *)pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_read() failed: %d\n", ret);
}
return ret;
}
/****************************************************************************
* Name: spiffs_check_rewrite_page
*
* Description:
* Copies page contents to a new page
*
* Input Parameters:
* fs - A reference to the SPIFFS volume object instance
* cur_pgndx - Current page index
* pghdr - Reference to page header
* new_pgndx - Location to return the new page index
*
* Returned Value:
* Zero (OK) is returned on success; A negated errno value is returned on
* any failure.
*
****************************************************************************/
static int spiffs_check_rewrite_page(FAR struct spiffs_s *fs,
int16_t cur_pgndx,
FAR struct spiffs_page_header_s *pghdr,
FAR int16_t *new_pgndx)
{
int ret;
ret = spiffs_page_allocate_data(fs, pghdr->objid, pghdr, 0, 0, 0, 0,
new_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_page_allocate_data() failed: %d\n", ret);
return ret;
}
ret = spiffs_phys_cpy(fs, 0,
SPIFFS_PAGE_TO_PADDR(fs, *new_pgndx) +
sizeof(struct spiffs_page_header_s),
SPIFFS_PAGE_TO_PADDR(fs, cur_pgndx) +
sizeof(struct spiffs_page_header_s),
SPIFFS_DATA_PAGE_SIZE(fs));
if (ret < 0)
{
ferr("ERROR: spiffs_phys_cpy() failed: %d\n", ret);
}
return ret;
}
/****************************************************************************
* Name: spiffs_check_rewrite_index
*
* Description:
* Rewrites the object index for given object ID and replaces the
* data page index to a new page index
*
* Input Parameters:
* fs - A reference to the SPIFFS volume object instance
* objid - Object ID
* data_spndx - Data span index
* pgnx - Page index
* objndx_pgndx - Object index page index
*
* Returned Value:
* Zero (OK) is returned on success; A negated errno value is returned on
* any failure.
*
****************************************************************************/
static int spiffs_check_rewrite_index(FAR struct spiffs_s *fs,
int16_t objid, int16_t data_spndx,
int16_t new_data_pgndx,
int16_t objndx_pgndx)
{
FAR struct spiffs_page_header_s *objndx_phdr;
int16_t blkndx;
int16_t free_pgndx;
int entry;
int ret;
objid |= SPIFFS_OBJID_NDXFLAG;
/* Find free entry */
ret = spiffs_objlu_find_free(fs, fs->free_blkndx,
fs->free_entry, &blkndx, &entry);
if (ret < 0)
{
fwarn("WARNING: spiffs_objlu_find_free() failed: %d\n", ret);
return ret;
}
free_pgndx = SPIFFS_OBJ_LOOKUP_ENTRY_TO_PGNDX(fs, blkndx, entry);
/* Calculate object index span index for given data page span index */
int16_t objndx_spndx = SPIFFS_OBJNDX_ENTRY_SPNDX(fs, data_spndx);
if (objndx_spndx == 0)
{
/* Calculate index in index header */
entry = data_spndx;
}
else
{
/* Calculate entry in index */
entry = SPIFFS_OBJNDX_ENTRY(fs, data_spndx);
}
/* Load index */
ret = spiffs_cache_read(fs, SPIFFS_OP_T_OBJ_LU2 | SPIFFS_OP_C_READ,
0, SPIFFS_PAGE_TO_PADDR(fs, objndx_pgndx),
SPIFFS_GEO_PAGE_SIZE(fs), fs->lu_work);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_read() failed: %d\n", ret);
return ret;
}
objndx_phdr = (FAR struct spiffs_page_header_s *)fs->lu_work;
/* Be ultra safe, double check header against provided data. Return
* -EFAULT to indicate this condition.
*/
if (objndx_phdr->objid != objid)
{
spiffs_page_delete(fs, free_pgndx);
return -EFAULT;
}
if (objndx_phdr->spndx != objndx_spndx)
{
spiffs_page_delete(fs, free_pgndx);
return -EFAULT;
}
if ((objndx_phdr->flags & (SPIFFS_PH_FLAG_USED | SPIFFS_PH_FLAG_NDXDELE |
SPIFFS_PH_FLAG_INDEX | SPIFFS_PH_FLAG_FINAL |
SPIFFS_PH_FLAG_DELET)) !=
(SPIFFS_PH_FLAG_NDXDELE | SPIFFS_PH_FLAG_DELET))
{
spiffs_page_delete(fs, free_pgndx);
return -EFAULT;
}
/* Rewrite in memory */
if (objndx_spndx == 0)
{
((FAR int16_t *)((FAR uint8_t *)fs->lu_work +
sizeof(struct spiffs_pgobj_ndxheader_s)))[data_spndx] =
new_data_pgndx;
}
else
{
((FAR int16_t *)((FAR uint8_t *)fs->lu_work +
sizeof(struct spiffs_page_objndx_s)))
[SPIFFS_OBJNDX_ENTRY(fs, data_spndx)] = new_data_pgndx;
}
ret = spiffs_cache_write(fs, SPIFFS_OP_T_OBJ_DA | SPIFFS_OP_C_UPDT, 0,
SPIFFS_PAGE_TO_PADDR(fs, free_pgndx),
SPIFFS_GEO_PAGE_SIZE(fs), fs->lu_work);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_write() failed: %d\n", ret);
return ret;
}
ret = spiffs_cache_write(fs, SPIFFS_OP_T_OBJ_LU | SPIFFS_OP_C_UPDT, 0,
SPIFFS_BLOCK_TO_PADDR(fs, SPIFFS_BLOCK_FOR_PAGE(fs, free_pgndx)) +
SPIFFS_OBJ_LOOKUP_ENTRY_FOR_PAGE(fs, free_pgndx) * sizeof(int16_t),
sizeof(int16_t), (FAR uint8_t *)&objid);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_write() failed: %d\n", ret);
return ret;
}
ret = spiffs_page_delete(fs, objndx_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_page_delete() failed: %d\n", ret);
}
return ret;
}
/****************************************************************************
* Name: spiffs_check_delobj_lazy
*
* Description:
* Deletes an object just by marking object index header as deleted
*
* Input Parameters:
* fs - A reference to the SPIFFS volume object instance
* objid - Object ID to be deleted
*
* Returned Value:
* Zero (OK) is returned on success; A negated errno value is returned on
* any failure.
*
****************************************************************************/
static int spiffs_check_delobj_lazy(FAR struct spiffs_s *fs, int16_t objid)
{
int16_t objhdr_pgndx;
uint8_t flags = 0xff;
int ret;
ret = spiffs_objlu_find_id_and_span(fs, objid, 0, 0, &objhdr_pgndx);
if (ret == -ENOENT)
{
return OK;
}
else if (ret < 0)
{
ferr("ERROR: spiffs_objlu_find_id_and_span() failed: %d\n", ret);
return ret;
}
#ifdef CONFIG_SPIFFS_NO_BLIND_WRITES
/* Perform a read-modify-write */
ret = spiffs_cache_read(fs, SPIFFS_OP_T_OBJ_LU | SPIFFS_OP_C_READ, 0,
SPIFFS_PAGE_TO_PADDR(fs, objhdr_pgndx) +
offsetof(struct spiffs_page_header_s, flags),
sizeof(flags), &flags);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_read() failed: %d\n", ret);
return ret;
}
#endif
/* Clear the deleted flag in FLASH to mark the page deleted */
flags &= ~SPIFFS_PH_FLAG_NDXDELE;
ret = spiffs_cache_write(fs, SPIFFS_OP_T_OBJ_LU | SPIFFS_OP_C_UPDT, 0,
SPIFFS_PAGE_TO_PADDR(fs, objhdr_pgndx) +
offsetof(struct spiffs_page_header_s, flags),
sizeof(flags), &flags);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_write() failed: %d\n", ret);
}
return ret;
}
/****************************************************************************
* Name: spiffs_check_rewrite_index
*
* Description:
* Validates the given look up entry
*
* Input Parameters:
* fs - A reference to the SPIFFS volume object instance
* objid - Object ID
* pghdr - Page header
* cur_pgndx - Current page index
* cur_block - Current block
* cur_entry - Current entry
* reload_lu - Reload lookup entry (returned)
*
* Returned Value:
* Zero (OK) is returned on success; A negated errno value is returned on
* any failure.
*
****************************************************************************/
static int
spiffs_check_luentry_validate(FAR struct spiffs_s *fs,
int16_t lu_objid,
FAR struct spiffs_page_header_s *pghdr,
int16_t cur_pgndx,
int16_t cur_block,
int cur_entry,
FAR bool *reload_lu)
{
int16_t objndx_pgndx;
int16_t ref_pgndx;
bool delete_page = false;
int ret = OK;
/* Check validity, take actions */
if (((lu_objid == SPIFFS_OBJID_DELETED) &&
(pghdr->flags & SPIFFS_PH_FLAG_DELET)) ||
((lu_objid == SPIFFS_OBJID_FREE) &&
(pghdr->flags & SPIFFS_PH_FLAG_USED) == 0))
{
/* Look up entry deleted / free but used in page header */
spiffs_checkinfo("pgndx=%04x deleted/free in lu but not on page\n",
cur_pgndx);
*reload_lu = true;
delete_page = true;
if (pghdr->flags & SPIFFS_PH_FLAG_INDEX)
{
/* Header says data page data page can be removed if not
* referenced by some object index
*/
ret = spiffs_check_get_data_pgndx(fs, pghdr->objid,
pghdr->spndx,
&ref_pgndx, &objndx_pgndx);
if (ret == -ENOENT)
{
/* No object with this objid, so remove page safely */
ret = OK;
}
else if (ret < 0)
{
ferr("ERROR: spiffs_check_get_data_pgndx() failed: %d\n", ret);
return ret;
}
else if (ref_pgndx == cur_pgndx)
{
int16_t new_pgndx;
/* Data page referenced by object index but deleted in lu copy
* page to new place and re-write the object index to new place
*/
ret = spiffs_check_rewrite_page(fs, cur_pgndx, pghdr,
&new_pgndx);
spiffs_checkinfo("Data page not found elsewhere, rewriting "
"%04x to new page %04x\n",
cur_pgndx, new_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_check_rewrite_page() failed: %d\n",
ret);
return ret;
}
*reload_lu = true;
spiffs_checkinfo("Page %04x rewritten to %04x, "
"affected objndx_pgndx %04x\n",
cur_pgndx, new_pgndx, objndx_pgndx);
ret = spiffs_check_rewrite_index(fs,
pghdr->objid, pghdr->spndx,
new_pgndx, objndx_pgndx);
if (ret == -EFAULT)
{
int ret2;
/* Index bad also, cannot mend this file */
spiffs_checkinfo("Index bad %d, cannot mend!\n", ret);
ret2 = spiffs_page_delete(fs, new_pgndx);
if (ret2 < 0)
{
ferr("ERROR: spiffs_page_delete() failed: %d\n", ret2);
return ret2;
}
ret2 = spiffs_check_delobj_lazy(fs, pghdr->objid);
if (ret2 < 0)
{
ferr("ERROR: spiffs_check_delobj_lazy() failed: %d\n",
ret2);
return ret2;
}
}
if (ret < 0)
{
ferr("ERROR: spiffs_check_rewrite_index() failed: %d\n",
ret);
return ret;
}
}
}
else
{
/* Header says index page index page can be removed if other index
* with same objid and span index is found
*/
ret = spiffs_objlu_find_id_and_span(fs,
pghdr->objid | SPIFFS_OBJID_NDXFLAG,
pghdr->spndx, cur_pgndx, 0);
if (ret == -ENOENT)
{
/* No such index page found, check for a data page amongst page
* headers. lu cannot be trusted
*/
ret =
spiffs_objlu_find_id_and_span_byphdr(fs,
pghdr->objid |
SPIFFS_OBJID_NDXFLAG,
0, 0, 0);
if (ret >= 0)
{
int16_t new_pgndx;
/* Ignore other errors. Got a data page also, assume lu
* corruption only, rewrite to new page
*/
ret = spiffs_check_rewrite_page(fs, cur_pgndx, pghdr,
&new_pgndx);
spiffs_checkinfo("Index page with data not found, "
"rewriting %04x to new page %04x\n",
cur_pgndx, new_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_check_rewrite_page() failed: %d\n",
ret);
return ret;
}
*reload_lu = true;
}
}
else if (ret < 0)
{
ferr("ERROR: spiffs_objlu_find_id_and_span_byphdr(): %d\n",
ret);
return ret;
}
}
}
if (lu_objid != SPIFFS_OBJID_FREE && lu_objid != SPIFFS_OBJID_DELETED)
{
/* look up entry used */
if ((pghdr->objid | SPIFFS_OBJID_NDXFLAG) !=
(lu_objid | SPIFFS_OBJID_NDXFLAG))
{
spiffs_checkinfo("pgndx %04x differ in objid lu="
"%04x ph:%04x\n", cur_pgndx, lu_objid,
pghdr->objid);
delete_page = true;
if ((pghdr->flags & SPIFFS_PH_FLAG_DELET) == 0 ||
(pghdr->flags & SPIFFS_PH_FLAG_FINAL) ||
(pghdr->flags & (SPIFFS_PH_FLAG_INDEX |
SPIFFS_PH_FLAG_NDXDELE)) == 0)
{
/* Page deleted or not finalized, just remove it */
}
else if (pghdr->flags & SPIFFS_PH_FLAG_INDEX)
{
/* if data page, check for reference to this page */
ret = spiffs_check_get_data_pgndx(fs,
pghdr->objid,
pghdr->spndx,
&ref_pgndx,
&objndx_pgndx);
if (ret == -ENOENT)
{
/* no object with this objid, so remove page safely */
ret = OK;
}
else if (ret < 0)
{
ferr("ERROR: spiffs_check_get_data_pgndx() failed: %d\n",
ret);
return ret;
}
/* if found, rewrite page with object ID, update index, and
* delete current
*/
else if (ref_pgndx == cur_pgndx)
{
int16_t new_pgndx;
ret = spiffs_check_rewrite_page(fs, cur_pgndx, pghdr,
&new_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_check_rewrite_page() failed: %d\n",
ret);
return ret;
}
ret = spiffs_check_rewrite_index(fs, pghdr->objid,
pghdr->spndx, new_pgndx,
objndx_pgndx);
if (ret == -EFAULT)
{
int ret2;
/* Index bad also, cannot mend this file */
spiffs_checkinfo("Index bad %d, cannot mend!\n", ret);
ret2 = spiffs_page_delete(fs, new_pgndx);
if (ret2 < 0)
{
ferr("ERROR: spiffs_page_delete() failed: %d\n",
ret2);
return ret2;
}
ret2 = spiffs_check_delobj_lazy(fs, pghdr->objid);
if (ret2 < 0)
{
ferr("ERROR: spiffs_check_delobj_lazy(): %d\n",
ret2);
return ret2;
}
*reload_lu = true;
}
if (ret < 0)
{
ferr("ERROR: spiffs_check_rewrite_index(): %d\n",
ret);
return ret;
}
}
}
else
{
int16_t objndx_pgndx_lu;
int16_t objndx_pgndx_ph;
/* Else if index, check for other pages with both ID's and
* span index
*
* See if other object index page exists for lookup objid
* and span index
*/
ret = spiffs_objlu_find_id_and_span(fs,
lu_objid | SPIFFS_OBJID_NDXFLAG,
pghdr->spndx, 0,
&objndx_pgndx_lu);
if (ret == -ENOENT)
{
ret = OK;
objndx_pgndx_lu = 0;
}
else if (ret < 0)
{
ferr("ERROR: spiffs_objlu_find_id_and_span() failed: %d\n",
ret);
return ret;
}
/* See if other object index exists for page header objid and
* span index
*/
ret = spiffs_objlu_find_id_and_span(fs,
pghdr->objid | SPIFFS_OBJID_NDXFLAG,
pghdr->spndx, 0, &objndx_pgndx_ph);
if (ret == -ENOENT)
{
ret = OK;
objndx_pgndx_ph = 0;
}
else if (ret < 0)
{
ferr("ERROR: spiffs_objlu_find_id_and_span() failed: %d\n",
ret);
return ret;
}
/* If both ID's found, just delete current */
if (objndx_pgndx_ph == 0 || objndx_pgndx_lu == 0)
{
struct spiffs_page_header_s new_ph;
int16_t data_pgndx_lu;
int16_t data_pgndx_ph;
int16_t new_pgndx;
/* Otherwise try finding first corresponding data pages.
*
* See if other data page exists for look up objid and
* span index
*/
ret =
spiffs_objlu_find_id_and_span(fs,
lu_objid & ~SPIFFS_OBJID_NDXFLAG,
0, 0, &data_pgndx_lu);
if (ret == -ENOENT)
{
ret = OK;
objndx_pgndx_lu = 0;
}
else if (ret < 0)
{
ferr("ERROR: spiffs_objlu_find_id_and_span(): %d\n",
ret);
return ret;
}
/* See if other data page exists for page header objid
* and span index
*/
ret =
spiffs_objlu_find_id_and_span(fs,
pghdr->objid &
~SPIFFS_OBJID_NDXFLAG,
0, 0, &data_pgndx_ph);
if (ret == -ENOENT)
{
ret = OK;
objndx_pgndx_ph = 0;
}
else if (ret < 0)
{
ferr("ERROR: spiffs_objlu_find_id_and_span(): %d\n",
ret);
return ret;
}
new_ph.flags = 0xff & ~(SPIFFS_PH_FLAG_USED |
SPIFFS_PH_FLAG_INDEX |
SPIFFS_PH_FLAG_FINAL);
new_ph.spndx = pghdr->spndx;
if ((objndx_pgndx_lu != 0 && data_pgndx_lu != 0 &&
data_pgndx_ph != 0 && objndx_pgndx_ph == 0) ||
(objndx_pgndx_lu == 0 && data_pgndx_ph &&
objndx_pgndx_ph == 0))
{
/* Got a data page for page header objid rewrite as
* objid_ph
*/
new_ph.objid = pghdr->objid | SPIFFS_OBJID_NDXFLAG;
ret = spiffs_check_rewrite_page(fs, cur_pgndx, &new_ph,
&new_pgndx);
spiffs_checkinfo(
"Rewrite page %04x as %04x to pgndx %04x\n",
cur_pgndx, new_ph.objid, new_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_check_rewrite_page(): %d\n",
ret);
return ret;
}
*reload_lu = true;
}
else if ((objndx_pgndx_ph != 0 && data_pgndx_ph != 0 &&
data_pgndx_lu != 0 && objndx_pgndx_lu == 0) ||
(objndx_pgndx_ph == 0 && data_pgndx_lu &&
objndx_pgndx_lu == 0))
{
/* Got a data page for look up objid rewrite as
* objid_lu
*/
new_ph.objid = lu_objid | SPIFFS_OBJID_NDXFLAG;
spiffs_checkinfo("Rewrite page %04x as %04x\n",
cur_pgndx, new_ph.objid);
ret = spiffs_check_rewrite_page(fs, cur_pgndx, &new_ph,
&new_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_check_rewrite_page(): %d\n",
ret);
return ret;
}
*reload_lu = true;
}
else
{
/* Cannot safely do anything */
spiffs_checkinfo("Nothing to do, just delete\n");
}
}
}
}
else if (((lu_objid & SPIFFS_OBJID_NDXFLAG) != 0 &&
(pghdr->flags & SPIFFS_PH_FLAG_INDEX) != 0) ||
((lu_objid & SPIFFS_OBJID_NDXFLAG) == 0 &&
(pghdr->flags & SPIFFS_PH_FLAG_INDEX) == 0))
{
int16_t data_pgndx;
int16_t objndx_pgndx_d;
spiffs_checkinfo("%04x lu/page index marking differ\n", cur_pgndx);
/* see if other data page exists for given objid and span index */
ret = spiffs_objlu_find_id_and_span(fs,
lu_objid & ~SPIFFS_OBJID_NDXFLAG,
pghdr->spndx, cur_pgndx, &data_pgndx);
if (ret == -ENOENT)
{
ret = OK;
data_pgndx = 0;
}
else if (ret < 0)
{
ferr("ERROR: spiffs_objlu_find_id_and_span() failed: %d\n",
ret);
return ret;
}
/* See if other object index exists for given objid and span
* index
*/
ret = spiffs_objlu_find_id_and_span(fs,
lu_objid | SPIFFS_OBJID_NDXFLAG,
pghdr->spndx, cur_pgndx,
&objndx_pgndx_d);
if (ret == -ENOENT)
{
ret = OK;
objndx_pgndx_d = 0;
}
else if (ret < 0)
{
ferr("ERROR: spiffs_objlu_find_id_and_span() failed: %d\n",
ret);
return ret;
}
delete_page = true;
/* If other data page exists and object index exists, just delete
* page
*/
if (data_pgndx != 0 && objndx_pgndx_d != 0)
{
spiffs_checkinfo(
"Other index and data page exists, simply remove\n");
}
/* If only data page exists, make this page index */
else if (data_pgndx && objndx_pgndx_d == 0)
{
struct spiffs_page_header_s new_ph;
int16_t new_pgndx;
spiffs_checkinfo("Other data page exists, make this index\n");
new_ph.flags = 0xff & ~(SPIFFS_PH_FLAG_USED |
SPIFFS_PH_FLAG_FINAL |
SPIFFS_PH_FLAG_INDEX);
new_ph.objid = lu_objid | SPIFFS_OBJID_NDXFLAG;
new_ph.spndx = pghdr->spndx;
ret = spiffs_page_allocate_data(fs, new_ph.objid, &new_ph,
0, 0, 0, 1, &new_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_page_allocate_data() failed: %d\n",
ret);
return ret;
}
ret = spiffs_phys_cpy(fs, 0,
SPIFFS_PAGE_TO_PADDR(fs, new_pgndx) +
sizeof(struct spiffs_page_header_s),
SPIFFS_PAGE_TO_PADDR(fs, cur_pgndx) +
sizeof(struct spiffs_page_header_s),
SPIFFS_GEO_PAGE_SIZE(fs) -
sizeof(struct spiffs_page_header_s));
if (ret < 0)
{
ferr("ERROR: spiffs_phys_cpy() failed: %d\n", ret);
return ret;
}
}
/* If only index exists, make data page */
else if (data_pgndx == 0 && objndx_pgndx_d)
{
struct spiffs_page_header_s new_ph;
int16_t new_pgndx;
spiffs_checkinfo("Other index page exists, make this data\n");
new_ph.flags = 0xff & ~(SPIFFS_PH_FLAG_USED |
SPIFFS_PH_FLAG_FINAL);
new_ph.objid = lu_objid & ~SPIFFS_OBJID_NDXFLAG;
new_ph.spndx = pghdr->spndx;
ret = spiffs_page_allocate_data(fs, new_ph.objid, &new_ph,
0, 0, 0, 1, &new_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_page_allocate_data() failed: %d\n",
ret);
return ret;
}
ret = spiffs_phys_cpy(fs, 0,
SPIFFS_PAGE_TO_PADDR(fs, new_pgndx) +
sizeof(struct spiffs_page_header_s),
SPIFFS_PAGE_TO_PADDR(fs, cur_pgndx) +
sizeof(struct spiffs_page_header_s),
SPIFFS_GEO_PAGE_SIZE(fs) -
sizeof(struct spiffs_page_header_s));
if (ret < 0)
{
ferr("ERROR: spiffs_phys_cpy() failed: %d\n", ret);
return ret;
}
}
else
{
/* If nothing exists, we cannot safely make a decision -
* delete
*/
}
}
else if ((pghdr->flags & SPIFFS_PH_FLAG_DELET) == 0)
{
spiffs_checkinfo("pgndx=%04x busy in lu but deleted on page\n",
cur_pgndx);
delete_page = 1;
}
else if ((pghdr->flags & SPIFFS_PH_FLAG_FINAL))
{
spiffs_checkinfo("pgndx=%04x busy but not final\n",
cur_pgndx);
/* Page can be removed if not referenced by object index */
*reload_lu = true;
ret = spiffs_check_get_data_pgndx(fs, lu_objid, pghdr->spndx,
&ref_pgndx, &objndx_pgndx);
if (ret == -ENOENT)
{
/* No object with this ID, so remove page safely */
ret = OK;
delete_page = true;
}
else if (ret < 0)
{
ferr("ERROR: spiffs_phys_cpy() failed: %d\n", ret);
return ret;
}
else if (ref_pgndx != cur_pgndx)
{
spiffs_checkinfo(
"Other finalized page is referred, just delete\n");
delete_page = true;
}
else
{
uint8_t flags = 0xff;
/* page referenced by object index but not final
* just finalize
*/
spiffs_checkinfo("Unfinalized page is referred, finalizing\n");
#ifdef CONFIG_SPIFFS_NO_BLIND_WRITES
ret = spiffs_cache_read(fs,
SPIFFS_OP_T_OBJ_DA | SPIFFS_OP_C_READ,
0, SPIFFS_PAGE_TO_PADDR(fs, cur_pgndx) +
offsetof(struct spiffs_page_header_s, flags),
sizeof(flags), &flags);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_read() failed: %d\n", ret);
return ret;
}
#endif
flags &= ~SPIFFS_PH_FLAG_FINAL;
ret = spiffs_cache_write(fs,
SPIFFS_OP_T_OBJ_DA | SPIFFS_OP_C_UPDT,
0, SPIFFS_PAGE_TO_PADDR(fs, cur_pgndx) +
offsetof(struct spiffs_page_header_s, flags),
sizeof(flags), &flags);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_write() failed: %d\n", ret);
return ret;
}
}
}
}
if (delete_page)
{
spiffs_checkinfo("Deleting page %04x\n", cur_pgndx);
ret = spiffs_page_delete(fs, cur_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_page_delete() failed: %d\n", ret);
return ret;
}
}
return ret;
}
/****************************************************************************
* Name: spiffs_lucheck_callback
*
* Description:
* This is a callback from spiffs_foreach_objlu(). It is part of the
* logic of spiffs_check_luconsistency(). It checks the page page
* header for each entry for validity.
*
* Input Parameters:
* fs - A reference to the SPIFFS volume object instance
* objid - Object ID
* cur_block - Current block
* cur_entry - Current entry
* user_const - User provided constant data
* user_var - User provided variable data
*
* Returned Value:
* Returns SPIFFS_VIS_COUNTINUE_RELOAD, SPIFFS_VIS_COUNTINUE, or a
* negated errno value in the event of a failure.
*
****************************************************************************/
static int spiffs_lucheck_callback(FAR struct spiffs_s *fs, int16_t objid,
int16_t cur_block, int cur_entry,
FAR const void *user_const,
FAR void *user_var)
{
struct spiffs_page_header_s pghdr;
int16_t cur_pgndx;
int ret = OK;
bool reload_lu = false;
cur_pgndx = SPIFFS_OBJ_LOOKUP_ENTRY_TO_PGNDX(fs, cur_block, cur_entry);
/* Load header */
ret = spiffs_cache_read(fs, SPIFFS_OP_T_OBJ_LU2 | SPIFFS_OP_C_READ,
0, SPIFFS_PAGE_TO_PADDR(fs, cur_pgndx),
sizeof(struct spiffs_page_header_s),
(FAR uint8_t *)&pghdr);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_read() failed: %d\n", ret);
return ret;
}
ret = spiffs_check_luentry_validate(fs, objid, &pghdr, cur_pgndx,
cur_block, cur_entry, &reload_lu);
if (ret < 0)
{
ferr("ERROR: spiffs_check_luentry_validate() failed: %d\n", ret);
return ret;
}
return reload_lu ? SPIFFS_VIS_COUNTINUE_RELOAD : SPIFFS_VIS_COUNTINUE;
}
/****************************************************************************
* Name: spifss_check_objndx_search
*
* Description:
* Searches for given object ID in temporary object ID index.
*
* Input Parameters:
* fs - A reference to the SPIFFS volume object instance
* objid - The Object ID
*
* Returned Value:
* The index associated with the objid is returned on success. -ENOENT
* is resutled if the objid was not found.
*
****************************************************************************/
static int spifss_check_objndx_search(FAR struct spiffs_s *fs, int16_t objid)
{
FAR int16_t *obj_table = (FAR int16_t *)fs->work;
int i;
objid &= ~SPIFFS_OBJID_NDXFLAG;
for (i = 0; i < SPIFFS_GEO_PAGE_SIZE(fs) / sizeof(int16_t); i++)
{
if ((obj_table[i] & ~SPIFFS_OBJID_NDXFLAG) == objid)
{
return i;
}
}
return -ENOENT;
}
/****************************************************************************
* Name: spiffs_check_objidconsistency_callback
*
* Description:
* Check object index consistency. This is callback from
* spiffs_foreach_objlu() and logically a part of
* spiffs_check_objidconsistency()
*
* Input Parameters:
* fs - A reference to the SPIFFS volume object instance
* objid - Object ID
* cur_block - Current block
* cur_entry - Current entry
* user_const - User provided constant data
* user_var - User provided variable data
*
* Returned Value:
* Returns SPIFFS_VIS_COUNTINUE_RELOAD, SPIFFS_VIS_COUNTINUE, or a
* negated errno value in the event of a failure.
*
****************************************************************************/
static int spiffs_check_objidconsistency_callback(FAR struct spiffs_s *fs,
int16_t objid,
int16_t cur_block,
int cur_entry,
FAR const void *user_const,
FAR void *user_var)
{
FAR uint32_t *log_ndx = (FAR uint32_t *)user_var;
FAR int16_t *obj_table = (FAR int16_t *)fs->work;
int retc = SPIFFS_VIS_COUNTINUE;
int ret = OK;
if (objid != SPIFFS_OBJID_FREE && objid != SPIFFS_OBJID_DELETED &&
(objid & SPIFFS_OBJID_NDXFLAG) != 0)
{
struct spiffs_page_header_s pghdr;
int16_t cur_pgndx;
cur_pgndx = SPIFFS_OBJ_LOOKUP_ENTRY_TO_PGNDX(fs, cur_block, cur_entry);
/* Load header */
ret = spiffs_cache_read(fs, SPIFFS_OP_T_OBJ_LU2 | SPIFFS_OP_C_READ,
0, SPIFFS_PAGE_TO_PADDR(fs, cur_pgndx),
sizeof(struct spiffs_page_header_s), (uint8_t *)&pghdr);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_read() failed: %d\n", ret);
return ret;
}
if (pghdr.spndx == 0 &&
(pghdr.flags & (SPIFFS_PH_FLAG_INDEX | SPIFFS_PH_FLAG_FINAL |
SPIFFS_PH_FLAG_DELET | SPIFFS_PH_FLAG_NDXDELE)) ==
(SPIFFS_PH_FLAG_DELET))
{
spiffs_checkinfo("pgndx=%04x, objid=%04x spndx=%04x "
"header not fully deleted - deleting\n",
cur_pgndx, objid, pghdr.spndx);
ret = spiffs_page_delete(fs, cur_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_page_delete() failed: %d\n", ret);
return ret;
}
return retc;
}
if ((pghdr.flags & (SPIFFS_PH_FLAG_INDEX | SPIFFS_PH_FLAG_FINAL |
SPIFFS_PH_FLAG_DELET | SPIFFS_PH_FLAG_NDXDELE)) ==
(SPIFFS_PH_FLAG_DELET | SPIFFS_PH_FLAG_NDXDELE))
{
return retc;
}
if (pghdr.spndx == 0)
{
int ret2;
/* objndx header page, register objid as reachable */
ret2 = spifss_check_objndx_search(fs, objid);
if (ret2 < 0)
{
/* Not registered, do it */
obj_table[*log_ndx] = objid & ~SPIFFS_OBJID_NDXFLAG;
(*log_ndx)++;
if (*log_ndx >= SPIFFS_GEO_PAGE_SIZE(fs) / sizeof(int16_t))
{
*log_ndx = 0;
}
}
}
else
{
bool delete = false;
int ret2;
/* Span index
* objndx page, see if header can be found
*/
ret2 = spifss_check_objndx_search(fs, objid);
if (ret2 < 0)
{
int16_t objhdr_pgndx;
/* Not in temporary index, try finding it */
ret = spiffs_objlu_find_id_and_span(fs,
objid | SPIFFS_OBJID_NDXFLAG,
0, 0, &objhdr_pgndx);
retc = SPIFFS_VIS_COUNTINUE_RELOAD;
if (ret >= 0)
{
/* Found, register as reachable */
obj_table[*log_ndx] = objid & ~SPIFFS_OBJID_NDXFLAG;
}
else if (ret == -ENOENT)
{
/* Not found, register as unreachable */
delete = true;
obj_table[*log_ndx] = objid | SPIFFS_OBJID_NDXFLAG;
}
else if (ret < 0)
{
ferr("ERROR: spiffs_objlu_find_id_and_span() failed: %d\n",
ret);
return ret;
}
(*log_ndx)++;
if (*log_ndx >= SPIFFS_GEO_PAGE_SIZE(fs) / sizeof(int16_t))
{
*log_ndx = 0;
}
}
else
{
/* In temporary index, check reachable flag */
if ((obj_table[ret2] & SPIFFS_OBJID_NDXFLAG))
{
/* Registered as unreachable */
delete = true;
}
}
if (delete)
{
spiffs_checkinfo("pgndx=%04x objid=%04x spndx:%04x"
" is orphan index - deleting\n",
cur_pgndx, objid, pghdr.spndx);
ret = spiffs_page_delete(fs, cur_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_page_delete() failed: %d\n", ret);
return ret;
}
}
}
}
return retc;
}
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: spiffs_check_luconsistency
*
* Description:
* Scans all object look up. For each entry, corresponding page header is
* checked for validity. If an object index header page is found, this is
* also checked
*
* Input Parameters:
* fs - A reference to the SPIFFS volume object instance
*
* Returned Value:
* Zero (OK) is returned on success; A negated errno value is returned on
* any failure.
*
****************************************************************************/
int spiffs_check_luconsistency(FAR struct spiffs_s *fs)
{
int ret = OK;
ret = spiffs_foreach_objlu(fs, 0, 0, 0, 0, spiffs_lucheck_callback,
0, 0, 0, 0);
if (ret == SPIFFS_VIS_END)
{
ret = OK;
}
return ret;
}
/****************************************************************************
* Name: spiffs_check_pgconsistency
*
* Description:
* Checks consistency amongst all pages and fixes irregularities
* Scans all pages (except lu pages), reserves 4 bits in working memory
* for each page
*
* bit 0: 0 == FREE|DELETED, 1 == USED
* bit 1: 0 == UNREFERENCED, 1 == REFERENCED
* bit 2: 0 == NOT_INDEX, 1 == INDEX
* bit 3: unused
*
* A consistent file system will have only pages being
*
* - x000 free, unreferenced, not index
* - x011 used, referenced only once, not index
* - x101 used, unreferenced, index
*
* The working memory might not fit all pages so several scans might be
* needed
*
* Input Parameters:
* fs - A reference to the SPIFFS volume object instance
*
* Returned Value:
* Zero (OK) is returned on success; A negated errno value is returned on
* any failure.
*
****************************************************************************/
int spiffs_check_pgconsistency(FAR struct spiffs_s *fs)
{
const uint32_t bits = 4;
const int16_t pages_per_scan = SPIFFS_GEO_PAGE_SIZE(fs) * 8 / bits;
int16_t pgndx_offset = 0;
int ret = OK;
/* For each range of pages fitting into work memory */
while (pgndx_offset < SPIFFS_GEO_PAGES_PER_BLOCK(fs) *
SPIFFS_GEO_BLOCK_COUNT(fs))
{
int16_t cur_block = 0;
bool restart = false;
memset(fs->work, 0, SPIFFS_GEO_PAGE_SIZE(fs));
/* Build consistency bitmap for ID range traversing all blocks */
while (!restart && cur_block < SPIFFS_GEO_BLOCK_COUNT(fs))
{
/* Traverse each page except for lookup pages */
int16_t cur_pgndx = SPIFFS_OBJ_LOOKUP_PAGES(fs) +
SPIFFS_GEO_PAGES_PER_BLOCK(fs) * cur_block;
while (!restart && cur_pgndx <
SPIFFS_GEO_PAGES_PER_BLOCK(fs) * (cur_block + 1))
{
struct spiffs_page_header_s pghdr;
uint32_t pgndx_bytendx;
uint8_t pgndx_bitndx;
bool within_range;
/* read header */
ret =
spiffs_cache_read(fs, SPIFFS_OP_T_OBJ_LU2 | SPIFFS_OP_C_READ,
0, SPIFFS_PAGE_TO_PADDR(fs, cur_pgndx),
sizeof(struct spiffs_page_header_s),
(FAR uint8_t *)&pghdr);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_read() failed: %d\n", ret);
return ret;
}
within_range = (cur_pgndx >= pgndx_offset &&
cur_pgndx < pgndx_offset + pages_per_scan);
pgndx_bytendx = (cur_pgndx - pgndx_offset) / (8 / bits);
pgndx_bitndx = (cur_pgndx & ((8 / bits) - 1)) * bits;
if (within_range &&
(pghdr.flags & SPIFFS_PH_FLAG_DELET) &&
(pghdr.flags & SPIFFS_PH_FLAG_USED) == 0)
{
/* Used */
fs->work[pgndx_bytendx] |= (1 << (pgndx_bitndx + 0));
}
if ((pghdr.flags & SPIFFS_PH_FLAG_DELET) &&
(pghdr.flags & SPIFFS_PH_FLAG_NDXDELE) &&
(pghdr.flags & (SPIFFS_PH_FLAG_INDEX |
SPIFFS_PH_FLAG_USED)) == 0)
{
FAR struct spiffs_page_header_s *objndx_phdr;
FAR int16_t *object_page_index;
int16_t data_spndx_offset;
int entries;
int i;
/* Found non-deleted index */
if (within_range)
{
fs->work[pgndx_bytendx] |= (1 << (pgndx_bitndx + 2));
}
/* Load non-deleted index */
ret = spiffs_cache_read(fs,
SPIFFS_OP_T_OBJ_LU2 | SPIFFS_OP_C_READ,
0, SPIFFS_PAGE_TO_PADDR(fs, cur_pgndx),
SPIFFS_GEO_PAGE_SIZE(fs), fs->lu_work);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_read() failed: %d\n", ret);
return ret;
}
/* traverse index for referenced pages */
objndx_phdr =
(FAR struct spiffs_page_header_s *)fs->lu_work;
if (pghdr.spndx == 0)
{
/* object header page index */
entries = SPIFFS_OBJHDR_NDXLEN(fs);
data_spndx_offset = 0;
object_page_index =
(FAR int16_t *)((FAR uint8_t *)fs->lu_work +
sizeof(struct spiffs_pgobj_ndxheader_s));
}
else
{
/* Object page index */
entries = SPIFFS_OBJNDX_LEN(fs);
data_spndx_offset = SPIFFS_OBJHDR_NDXLEN(fs) +
SPIFFS_OBJNDX_LEN(fs) *
(pghdr.spndx - 1);
object_page_index =
(FAR int16_t *)((FAR uint8_t *) fs->lu_work +
sizeof(struct spiffs_page_objndx_s));
}
/* For all entries in index */
for (i = 0; !restart && i < entries; i++)
{
int16_t rpgndx = object_page_index[i];
bool rpgndx_within_range;
rpgndx_within_range = (rpgndx >= pgndx_offset &&
rpgndx < pgndx_offset +
pages_per_scan);
if ((rpgndx != (int16_t) - 1 &&
rpgndx > SPIFFS_GEO_PAGE_COUNT(fs)) ||
(rpgndx_within_range &&
SPIFFS_IS_LOOKUP_PAGE(fs, rpgndx)))
{
int16_t data_pgndx;
/* Bad reference */
spiffs_checkinfo("pgndx=%04x bad pgndx / LU "
"referenced from page %04x\n",
rpgndx, cur_pgndx);
/* Check for data page elsewhere */
ret = spiffs_objlu_find_id_and_span(fs,
objndx_phdr->objid &
~SPIFFS_OBJID_NDXFLAG,
data_spndx_offset + i,
0, &data_pgndx);
if (ret == -ENOENT)
{
ret = OK;
data_pgndx = 0;
}
else if (ret < 0)
{
ferr("ERR: spiffs_objlu_find_id_and_span %d\n",
ret);
return ret;
}
if (data_pgndx == 0)
{
struct spiffs_page_header_s new_ph;
/* If not, allocate free page */
new_ph.flags = 0xff & ~(SPIFFS_PH_FLAG_USED |
SPIFFS_PH_FLAG_FINAL);
new_ph.objid = objndx_phdr->objid &
~SPIFFS_OBJID_NDXFLAG;
new_ph.spndx = data_spndx_offset + i;
ret = spiffs_page_allocate_data(fs,
new_ph.objid,
&new_ph, 0, 0, 0, 1,
&data_pgndx);
if (ret < 0)
{
ferr("ERR: spiffs_page_allocate_data %d\n",
ret);
return ret;
}
spiffs_checkinfo("Found no existing data page,"
" created new @ %04x\n",
data_pgndx);
}
/* Remap index */
spiffs_checkinfo("Rewriting index pgndx=%04x\n",
cur_pgndx);
ret =
spiffs_check_rewrite_index(fs,
objndx_phdr->objid |
SPIFFS_OBJID_NDXFLAG,
data_spndx_offset + i,
data_pgndx, cur_pgndx);
if (ret == -EFAULT)
{
/* Index bad also, cannot mend this file */
spiffs_checkinfo("Index bad %d, cannot mend - "
"delete object\n",
ret);
/* Delete file */
ret = spiffs_page_delete(fs, cur_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_page_delete(): %d\n",
ret);
return ret;
}
}
else if (ret < 0)
{
ferr("ERR: spiffs_check_rewrite_index(): %d\n",
ret);
return ret;
}
restart = true;
}
else if (rpgndx_within_range)
{
/* Valid reference. read referenced page header */
struct spiffs_page_header_s rphdr;
ret =
spiffs_cache_read(fs,
SPIFFS_OP_T_OBJ_LU2 | SPIFFS_OP_C_READ,
0, SPIFFS_PAGE_TO_PADDR(fs, rpgndx),
sizeof(struct spiffs_page_header_s),
(FAR uint8_t *)&rphdr);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_read() failed: %d\n",
ret);
return ret;
}
/* Cross reference page header check */
if (rphdr.objid != (pghdr.objid &
~SPIFFS_OBJID_NDXFLAG) ||
rphdr.spndx != data_spndx_offset + i ||
(rphdr.flags & (SPIFFS_PH_FLAG_DELET |
SPIFFS_PH_FLAG_INDEX |
SPIFFS_PH_FLAG_USED)) !=
(SPIFFS_PH_FLAG_DELET | SPIFFS_PH_FLAG_INDEX))
{
int16_t data_pgndx;
spiffs_checkinfo(
"pgndx=%04x has inconsistent page header "
"index objid/span:%04x/%04x, "
"ref objid/span:%04x/%04x flags=%02x\n",
rpgndx, pghdr.objid & ~SPIFFS_OBJID_NDXFLAG,
data_spndx_offset + i, rphdr.objid,
rphdr.spndx, rphdr.flags);
/* Try finding correct page */
ret =
spiffs_objlu_find_id_and_span(fs,
pghdr.objid &
~SPIFFS_OBJID_NDXFLAG,
data_spndx_offset + i,
rpgndx, &data_pgndx);
if (ret == -ENOENT)
{
ret = OK;
data_pgndx = 0;
}
else if (ret < 0)
{
ferr("spiffs_objlu_find_id_and_span: %d\n",
ret);
return ret;
}
if (data_pgndx == 0)
{
/* Not found, this index is badly borked */
spiffs_checkinfo(
"Index bad, delete object objid %04x\n",
pghdr.objid);
ret = spiffs_check_delobj_lazy(fs,
pghdr.objid);
if (ret < 0)
{
ferr("spiffs_check_delobj_lazy: %d\n",
ret);
return ret;
}
break;
}
else
{
/* Found it, so rewrite index */
spiffs_checkinfo(
"Found correct data pgndx=%04x, "
"rewrite index pgndx=%04x objid=%04x\n",
data_pgndx, cur_pgndx, pghdr.objid);
ret =
spiffs_check_rewrite_index(
fs, pghdr.objid, data_spndx_offset + i,
data_pgndx, cur_pgndx);
if (ret == -EFAULT)
{
/* Index bad, cannot mend this file */
spiffs_checkinfo(
"Index bad %d, cannot mend!\n", ret);
ret = spiffs_check_delobj_lazy(
fs, pghdr.objid);
}
else if (ret < 0)
{
ferr("spiffs_check_rewrite_index %d\n",
ret);
return ret;
}
restart = true;
}
}
else
{
/* Mark rpgndx as referenced */
const uint32_t rpgndx_byte_ix =
(rpgndx - pgndx_offset) / (8 / bits);
const uint8_t rpgndx_bit_ix =
(rpgndx & ((8 / bits) - 1)) * bits;
if ((fs->work[rpgndx_byte_ix] &
(1 << (rpgndx_bit_ix + 1))) != 0)
{
spiffs_checkinfo(
"pgndx=%04x multiple referenced "
"from page %04x\n",
rpgndx, cur_pgndx);
/* Here, we should have fixed all broken
* references - getting this means there
* must be multiple files with same object
* ID. Only solution is to delete
* the object which is referring to this
* page
*/
spiffs_checkinfo("Removing objid=%04x and"
"page=%04x\n",
pghdr.objid, cur_pgndx);
ret = spiffs_check_delobj_lazy(
fs, pghdr.objid);
if (ret < 0)
{
ferr("spiffs_check_delobj_lazy: %d\n",
ret);
return ret;
}
/* Precaution, delete this page also */
ret = spiffs_page_delete(fs, cur_pgndx);
if (ret < 0)
{
ferr("ERR: spiffs_page_delete(): %d\n",
ret);
return ret;
}
restart = true;
}
fs->work[rpgndx_byte_ix] |=
(1 << (rpgndx_bit_ix + 1));
}
}
}
}
/* Next page */
cur_pgndx++;
}
/* Next block */
cur_block++;
}
/* Check consistency bitmap */
if (!restart)
{
uint32_t byte_ndx;
int16_t objndx_pgndx;
int16_t rpgndx;
uint8_t bit_ndx;
for (byte_ndx = 0;
!restart && byte_ndx < SPIFFS_GEO_PAGE_SIZE(fs);
byte_ndx++)
{
for (bit_ndx = 0; !restart && bit_ndx < 8 / bits; bit_ndx++)
{
uint8_t bitmask;
int16_t cur_pgndx;
bitmask = (fs->work[byte_ndx] >> (bit_ndx * bits)) & 0x7;
cur_pgndx = pgndx_offset + byte_ndx * (8 / bits) + bit_ndx;
/* 000 ok - free, unreferenced, not index */
if (bitmask == 0x1)
{
struct spiffs_page_header_s pghdr;
bool rewrite_ndx_to_this = false;
bool delete_page = false;
/* 001 */
spiffs_checkinfo(
"pgndx=%04x USED, UNREFERENCED, not index\n",
cur_pgndx);
/* Check corresponding object index entry */
ret = spiffs_cache_read(fs,
SPIFFS_OP_T_OBJ_LU2 | SPIFFS_OP_C_READ,
0,
SPIFFS_PAGE_TO_PADDR(fs, cur_pgndx),
sizeof(struct spiffs_page_header_s),
(FAR uint8_t *)&pghdr);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_read() failed: %d\n",
ret);
return ret;
}
ret = spiffs_check_get_data_pgndx(fs, pghdr.objid,
pghdr.spndx, &rpgndx,
&objndx_pgndx);
if (ret >= 0)
{
if (((rpgndx == (int16_t) - 1 ||
rpgndx > SPIFFS_GEO_PAGE_COUNT(fs)) ||
(SPIFFS_IS_LOOKUP_PAGE(fs, rpgndx))))
{
/* Pointing to a bad page altogether, rewrite
* index to this
*/
rewrite_ndx_to_this = true;
spiffs_checkinfo(
"Corresponding ref is bad: "
"%04x, rewrite to this %04x\n",
rpgndx, cur_pgndx);
}
else
{
struct spiffs_page_header_s rphdr;
/* Pointing to something else, check what */
ret =
spiffs_cache_read(fs,
SPIFFS_OP_T_OBJ_LU2 |
SPIFFS_OP_C_READ,
0,
SPIFFS_PAGE_TO_PADDR(fs, rpgndx),
sizeof(struct spiffs_page_header_s),
(FAR uint8_t *)&rphdr);
if (ret < 0)
{
ferr("ERROR: spiffs_cache_read(): %d\n",
ret);
return ret;
}
if (((pghdr.objid & ~SPIFFS_OBJID_NDXFLAG) ==
rphdr.objid) &&
((rphdr.flags & (SPIFFS_PH_FLAG_INDEX |
SPIFFS_PH_FLAG_DELET |
SPIFFS_PH_FLAG_USED |
SPIFFS_PH_FLAG_FINAL)) ==
(SPIFFS_PH_FLAG_INDEX |
SPIFFS_PH_FLAG_DELET)))
{
/* Pointing to something else valid, just
* delete this page then
*/
spiffs_checkinfo(
"Corresponding ref is good but "
"different: %04x, delete this %04x\n",
rpgndx, cur_pgndx);
delete_page = true;
}
/* Pointing to something weird, update index
* to point to this page instead
*/
else if (rpgndx != cur_pgndx)
{
spiffs_checkinfo
("PA: corresponding ref is weird: "
"%04x %s%s%s%s, rewrite this "
"%04x\n", rpgndx,
(rphdr.flags & SPIFFS_PH_FLAG_INDEX) ?
"" : "INDEX ",
(rphdr.flags & SPIFFS_PH_FLAG_DELET) ?
"" : "DELETED ",
(rphdr.flags & SPIFFS_PH_FLAG_USED) ?
"NOTUSED " : "",
(rphdr.flags & SPIFFS_PH_FLAG_FINAL) ?
"NOTFINAL " : "", cur_pgndx);
rewrite_ndx_to_this = true;
}
else
{
/* Should not happen, destined for fubar */
}
}
}
else if (ret == -ENOENT)
{
spiffs_checkinfo("Corresponding ref not found, "
"delete %04x\n",
cur_pgndx);
delete_page = true;
ret = OK;
}
if (rewrite_ndx_to_this)
{
/* If pointing to invalid page, redirect index to
* this page
*/
spiffs_checkinfo(
"Rewrite index objid=%04x data spndx=%04x"
" to point to this pgndx: %04x\n",
pghdr.objid, pghdr.spndx, cur_pgndx);
ret = spiffs_check_rewrite_index(fs, pghdr.objid,
pghdr.spndx, cur_pgndx, objndx_pgndx);
if (ret == -EFAULT)
{
int ret2;
/* Index bad also, cannot mend this file */
spiffs_checkinfo("PA: FIXUP: index bad %d"
", cannot mend!\n", ret);
ret2 = spiffs_page_delete(fs, cur_pgndx);
if (ret2 < 0)
{
ferr("ERROR: spiffs_page_delete(): %d\n",
ret2);
return ret2;
}
ret2 = spiffs_check_delobj_lazy(fs,
pghdr.objid);
if (ret2 < 0)
{
ferr(
"ERR: spiffs_check_delobj_lazy(): %d\n",
ret2);
return ret2;
}
}
else if (ret < 0)
{
ferr("ERR: spiffs_check_rewrite_index(): %d\n",
ret);
return ret;
}
restart = true;
continue;
}
else if (delete_page)
{
spiffs_checkinfo("Deleting page %04x\n",
cur_pgndx);
ret = spiffs_page_delete(fs, cur_pgndx);
if (ret < 0)
{
ferr("ERROR: spiffs_page_delete(): %d\n", ret);
return ret;
}
}
}
if (bitmask == 0x2)
{
/* 010 */
spiffs_checkinfo(
"pgndx=%04x FREE, REFERENCED, not index\n",
cur_pgndx);
/* No op, this should be taken care of when checking
* valid references
*/
}
/* 011 OK - busy, referenced, not index */
if (bitmask == 0x4)
{
/* 100 */
spiffs_checkinfo(
"pgndx=%04x FREE, unreferenced, INDEX\n", cur_pgndx);
/* This should never happen, major fubar */
}
/* 101 OK - busy, unreferenced, index */
if (bitmask == 0x6)
{
/* 110 */
spiffs_checkinfo(
"pgndx=%04x FREE, REFERENCED, INDEX\n", cur_pgndx);
/* No op, this should be taken care of when checking
* valid references
*/
}
if (bitmask == 0x7)
{
/* 111 */
spiffs_checkinfo(
"pgndx=%04x USED, REFERENCED, INDEX\n", cur_pgndx);
/* No op, this should be taken care of when checking
* valid references
*/
}
}
}
}
spiffs_checkinfo("Processed %04x, restart %d\n",
pgndx_offset, restart);
/* next page range */
if (!restart)
{
pgndx_offset += pages_per_scan;
}
}
return ret;
}
/****************************************************************************
* Name: spiffs_check_pgconsistency
*
* Description:
* Removes orphaned and partially deleted index pages.
* Scans for index pages. When an index page is found, corresponding index
* header is searched for. If no such page exists, the index page cannot
* be reached as no index header exists and must be deleted.
*
* Input Parameters:
* fs - A reference to the SPIFFS volume object instance
*
* Returned Value:
* Zero (OK) is returned on success; A negated errno value is returned on
* any failure.
*
****************************************************************************/
int spiffs_check_objidconsistency(FAR struct spiffs_s *fs)
{
uint32_t objid_logndx = 0;
int ret = OK;
/* Implementation not:
* fs->work is used for a temporary object index memory, listing found
* object ids and indicating whether they can be reached or not. Acting
* as a FIFO if object ids cannot fit. In the temporary object index
* memory, SPIFFS_OBJID_NDXFLAG bit is used to indicate a reachable/
* unreachable object ID.
*/
memset(fs->work, 0, SPIFFS_GEO_PAGE_SIZE(fs));
ret = spiffs_foreach_objlu(fs, 0, 0, 0, 0,
spiffs_check_objidconsistency_callback, 0,
&objid_logndx, 0, 0);
if (ret == SPIFFS_VIS_END)
{
ret = OK;
}
return ret;
}
/****************************************************************************
* Name: spiffs_dump
*
* Description:
* Dump logical flash content
*
* Input Parameters:
* fs - A reference to the volume structure
*
* Returned Value:
* Zero (OK) is returned on success; A negated errno value is returned on
* any failure.
*
****************************************************************************/
#ifdef CONFIG_SPIFFS_DUMP
int spiffs_dump(FAR struct spiffs_s *fs)
{
FAR int16_t *objlu_buf = (FAR int16_t *)fs->lu_work;
uint32_t pages_per_block;
uint32_t blocks;
uint32_t obj_lupages;
uint32_t data_pgsize;
uint32_t ndata_pages;
int16_t pgndx = 0;
int16_t erase_count;
char buffer[80];
int entries_per_page;
int len = 0;
int ret = OK;
entries_per_page = (SPIFFS_GEO_PAGE_SIZE(fs) / sizeof(int16_t));
while (pgndx < SPIFFS_GEO_PAGE_COUNT(fs))
{
/* Check each object lookup page */
int obj_lookup_page = 0;
int cur_entry = 0;
while (ret >= 0 && obj_lookup_page < (int)SPIFFS_OBJ_LOOKUP_PAGES(fs))
{
int entry_offset = obj_lookup_page * entries_per_page;
ret = spiffs_cache_read(fs, SPIFFS_OP_T_OBJ_LU | SPIFFS_OP_C_READ,
0, pgndx * SPIFFS_GEO_PAGE_SIZE(fs) +
SPIFFS_PAGE_TO_PADDR(fs, obj_lookup_page),
SPIFFS_GEO_PAGE_SIZE(fs), fs->lu_work);
/* Check each entry */
while (ret >= 0 &&
cur_entry - entry_offset < entries_per_page &&
cur_entry < (int)(SPIFFS_GEO_PAGES_PER_BLOCK(fs) -
SPIFFS_OBJ_LOOKUP_PAGES(fs)))
{
int16_t objid = objlu_buf[cur_entry - entry_offset];
if (cur_entry == 0)
{
len += snprintf(&buffer[len], 80 - len, "%04x ", pgndx);
}
else if ((cur_entry & 0x3f) == 0)
{
len += snprintf(&buffer[len], 80 - len, " ");
}
if ((objid == SPIFFS_OBJID_FREE) != 0)
{
len += snprintf(&buffer[len], 80 - len, ".");
}
else if ((objid == SPIFFS_OBJID_DELETED) != 0)
{
len += snprintf(&buffer[len], 80 - len, "x");
}
else if ((objid & SPIFFS_OBJID_NDXFLAG) != 0)
{
len += snprintf(&buffer[len], 80 - len, "I");
}
else
{
len += snprintf(&buffer[len], 80 - len, "D");
}
cur_entry++;
if ((cur_entry & 0x3f) == 0)
{
len += snprintf(&buffer[len], 80 - len, "\n");
spiffs_checkinfo("%s", buffer);
len = 0;
}
}
obj_lookup_page++;
}
ret = spiffs_cache_read(fs, SPIFFS_OP_C_READ | SPIFFS_OP_T_OBJ_LU2, 0,
SPIFFS_ERASE_COUNT_PADDR(fs, pgndx),
sizeof(int16_t), (FAR uint8_t *)&erase_count);
if (ret < 0)
{
ferr("ERROR: spiffs_mtd_read() failed: %d\n", ret);
return ret;
}
if (erase_count != (int16_t)-1)
{
len += snprintf(&buffer[len], 80 - len,
" era_cnt=%d\n", erase_count);
}
else
{
len += snprintf(&buffer[len], 80 - len, " era_cnt (N/A)\n");
}
spiffs_checkinfo("%s", buffer);
len = 0;
pgndx++;
}
spiffs_checkinfo("era_cnt_max: %d\n", fs->max_erase_count);
spiffs_checkinfo("blocks: %d\n", SPIFFS_GEO_PAGE_COUNT(fs));
spiffs_checkinfo("free_blocks: %d\n", fs->free_blocks);
spiffs_checkinfo("page_alloc: %d\n", fs->alloc_pages);
spiffs_checkinfo("page_delet: %d\n", fs->deleted_pages);
/* The following duplicates some logic from spiffs_statfs().
* -2 for spare blocks, +1 for emergency page.
*/
pages_per_block = SPIFFS_GEO_PAGES_PER_BLOCK(fs);
blocks = SPIFFS_GEO_BLOCK_COUNT(fs);
obj_lupages = SPIFFS_OBJ_LOOKUP_PAGES(fs);
data_pgsize = SPIFFS_DATA_PAGE_SIZE(fs);
ndata_pages = (blocks - 2) * (pages_per_block - obj_lupages) + 1;
spiffs_checkinfo("used: %ld of %ld\n",
(long)(fs->alloc_pages * data_pgsize),
(long)(ndata_pages * data_pgsize));
return OK;
}
#endif
|
9c75484369ee227ef0d358ad773af825b6df6014
|
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
|
/SOFTWARE/A64-TERES/linux-a64/include/uapi/linux/stddef.h
|
aa9f104287438aa32c622d006d4d30794b9b220b
|
[
"Linux-syscall-note",
"GPL-2.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
OLIMEX/DIY-LAPTOP
|
ae82f4ee79c641d9aee444db9a75f3f6709afa92
|
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
|
refs/heads/rel3
| 2023-08-04T01:54:19.483792
| 2023-04-03T07:18:12
| 2023-04-03T07:18:12
| 80,094,055
| 507
| 92
|
Apache-2.0
| 2023-04-03T07:05:59
| 2017-01-26T07:25:50
|
C
|
UTF-8
|
C
| false
| false
| 28
|
h
|
stddef.h
|
#include <linux/compiler.h>
|
e03344aa42a9680fedb4a358ed93f84425d452c2
|
7664f318ed04bd0680f3d82321c18896e3ef6ad5
|
/src/overlays/actors/ovl_Oceff_Spot/z_oceff_spot.h
|
a96f1878af83c53cc56295f7e0fb1f0d5b681678
|
[] |
no_license
|
zeldaret/oot
|
9c80ce17f2d8fd61514b375f92ee4739b5ce9d4e
|
2875ab4fcf5c5f81d76353d1ee0024c9ea8d0b23
|
refs/heads/master
| 2023-08-29T05:29:31.356427
| 2023-08-28T22:48:52
| 2023-08-28T22:48:52
| 247,875,738
| 4,401
| 802
| null | 2023-09-14T13:34:38
| 2020-03-17T04:02:19
|
C
|
UTF-8
|
C
| false
| false
| 537
|
h
|
z_oceff_spot.h
|
#ifndef Z_OCEFF_SPOT_H
#define Z_OCEFF_SPOT_H
#include "ultra64.h"
#include "global.h"
struct OceffSpot;
typedef void (*OceffSpotActionFunc)(struct OceffSpot*, PlayState*);
typedef struct OceffSpot {
/* 0x0000 */ Actor actor;
/* 0x014C */ LightNode* lightNode1;
/* 0x0150 */ LightInfo lightInfo1;
/* 0x0160 */ LightNode* lightNode2;
/* 0x0164 */ LightInfo lightInfo2;
/* 0x0174 */ f32 unk_174;
/* 0x0178 */ u16 timer;
/* 0x017C */ OceffSpotActionFunc actionFunc;
} OceffSpot; // size = 0x0180
#endif
|
80645c876cb9715c6d3841ae7bc7354c8dd5180d
|
efe37ebcbadc6dda488f321f59b34b3a8f918960
|
/snes-examples/logo/snes-logo-pvsneslib/src/main.c
|
34d30d9fcc7ca6e3473d4e887a15a6144f056ef4
|
[
"MIT"
] |
permissive
|
alekmaul/pvsneslib
|
83fd163e1fdca6a4ce7bc49b5562cb7e49a021df
|
69203c23f3e0c0f8a61e9452819f8690aefa476b
|
refs/heads/master
| 2023-08-31T21:09:17.036094
| 2023-08-26T06:21:28
| 2023-08-26T06:21:28
| 32,628,824
| 673
| 83
| null | 2023-09-06T04:23:42
| 2015-03-21T10:49:52
|
C
|
UTF-8
|
C
| false
| false
| 771
|
c
|
main.c
|
/*---------------------------------------------------------------------------------
"Made with PVSnesLib" Logo for SNES Projects
---------------------------------------------------------------------------------*/
#include <snes.h>
#include "logo.h"
int main(void) {
// Initialize sound engine (take some time)
spcBoot();
// Initialize SNES
consoleInit();
dmaClearVram();
initPVSnesLibLogo();
setFadeEffectEx(FADE_IN, 8);
WaitForVBlank();
while (1) {
if (updatePVSnesLibLogo() == 1) {
// The logo animation is complete
// Paste your game code here
// consoleNocashMessage("Start your game!");
}
// Wait for vblank
WaitForVBlank();
}
return 0;
}
|
547a8f6a2d31ec52c7fb380fb302a8705a79f185
|
6d088ec295b33db11e378212d42d40d5a190c54c
|
/core/vil1/vil1_open.h
|
ad8dbfae844af7a23144968131a4cf2e7c076f7c
|
[] |
no_license
|
vxl/vxl
|
29dffd5011f21a67e14c1bcbd5388fdbbc101b29
|
594ebed3d5fb6d0930d5758630113e044fee00bc
|
refs/heads/master
| 2023-08-31T03:56:24.286486
| 2023-08-29T17:53:12
| 2023-08-29T17:53:12
| 9,819,799
| 224
| 126
| null | 2023-09-14T15:52:32
| 2013-05-02T18:32:27
|
C++
|
UTF-8
|
C
| false
| false
| 330
|
h
|
vil1_open.h
|
// This is core/vil1/vil1_open.h
#ifndef vil1_open_h_
#define vil1_open_h_
//:
// \file
// \brief make a vil1_stream from a filename, an URL, etc.
//
// \author fsm
//
// \verbatim
// Modifications
//\endverbatim
#include "vil1_stream.h"
vil1_stream *vil1_open(char const* what, char const* how = "r");
#endif // vil1_open_h_
|
439953a5d860673cf621aa20043b328ae76e1627
|
6832a9c81b16ead373325f70fb97753d6a211539
|
/src/wrapped/wrappedlibxpm_private.h
|
fc71b02ed9efa781aad7bb96f15f84d1f81de52e
|
[
"MIT"
] |
permissive
|
ptitSeb/box86
|
2d3a0c4574905ff8d9dc7b5afb5444a273832bd6
|
ad934455c1b40feeebe5e6bd09b9063519102859
|
refs/heads/master
| 2023-08-22T05:30:54.331500
| 2023-08-20T13:01:58
| 2023-08-20T13:01:58
| 164,844,546
| 2,877
| 294
|
MIT
| 2023-07-27T11:19:10
| 2019-01-09T10:49:32
|
C
|
UTF-8
|
C
| false
| false
| 2,003
|
h
|
wrappedlibxpm_private.h
|
#if !(defined(GO) && defined(GOM) && defined(GO2) && defined(DATA))
#error Meh....
#endif
//GO(xpmatoui,
GO(XpmAttributesSize, iFv)
GO(XpmCreateBufferFromImage, iFppppp) // need unwrapping of Image?
GO(XpmCreateBufferFromPixmap, iFppppp)
GO(XpmCreateBufferFromXpmImage, iFppp)
GO(XpmCreateDataFromImage, iFppppp)
GO(XpmCreateDataFromPixmap, iFppppp)
GO(XpmCreateDataFromXpmImage, iFppp)
GO(XpmCreateImageFromBuffer, iFppppp) // XImage...
GO(XpmCreateImageFromData, iFppppp) // XImage...
//GO(xpmCreateImageFromPixmap,
GO(XpmCreateImageFromXpmImage, iFppppp)
GO(XpmCreatePixmapFromBuffer, iFpppppp)
GO(XpmCreatePixmapFromData, iFpppppp)
//GO(xpmCreatePixmapFromImage,
GO(XpmCreatePixmapFromXpmImage, iFpppppp)
GO(XpmCreateXpmImageFromBuffer, iFppp)
GO(XpmCreateXpmImageFromData, iFppp)
GO(XpmCreateXpmImageFromImage, iFppppp)
GO(XpmCreateXpmImageFromPixmap, iFppppp)
GO(XpmFree, vFp)
GO(XpmFreeAttributes, vFp)
//GO(xpmFreeColorTable,
GO(XpmFreeExtensions, vFpi)
//GO(xpmFreeRgbNames,
GO(XpmFreeXpmImage, vFp)
GO(XpmFreeXpmInfo, vFp)
//GO(xpmGetCmt,
GO(XpmGetErrorString, pFi)
//GO(xpmGetRgbName,
//GO(xpmGetString,
//GO(xpmHashIntern,
//GO(xpmHashSlot,
//GO(xpmHashTableFree,
//GO(xpmHashTableInit,
//GO(xpmInitAttributes,
//GO(xpmInitXpmImage,
//GO(xpmInitXpmInfo,
GO(XpmLibraryVersion, iFv)
//GO(xpmNextString,
//GO(xpmNextUI,
//GO(xpmNextWord,
//GO(xpmParseColors,
//GO(xpmParseData,
//GO(xpmParseDataAndCreate,
//GO(xpmParseExtensions,
//GO(xpmParseHeader,
//GO(xpmParseValues,
//GO(xpmPipeThrough,
GO(XpmReadFileToBuffer, iFpp)
GO(XpmReadFileToData, iFpp)
//GO(XpmReadFileToImage,
GO(XpmReadFileToPixmap, iFpppppp)
GO(XpmReadFileToXpmImage, iFppp)
//GO(xpmReadRgbNames,
//GO(xpmSetAttributes,
//GO(xpmSetInfo,
//GO(xpmSetInfoMask,
GO(XpmWriteFileFromBuffer, iFpp)
GO(XpmWriteFileFromData, iFpp)
//GO(XpmWriteFileFromImage,
GO(XpmWriteFileFromPixmap, iFppppp)
GO(XpmWriteFileFromXpmImage, iFppp)
//GO(xpm_xynormalizeimagebits,
//GO(xpm_znormalizeimagebits,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.