source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
blast_kappa.c | /* $Id: blast_kappa.c 615057 2020-08-26 15:29:10Z fongah2 $
* ==========================================================================
*
* PUBLIC DOMAIN NOTICE
* National Center for Biotechnology Information
*
* This software/database is a "United States Government Work" under the
* terms of the United States Copyright Act. It was written as part of
* the author's official duties as a United States Government employee and
* thus cannot be copyrighted. This software/database is freely available
* to the public for use. The National Library of Medicine and the U.S.
* Government have not placed any restriction on its use or reproduction.
*
* Although all reasonable efforts have been taken to ensure the accuracy
* and reliability of the software and data, the NLM and the U.S.
* Government do not and cannot warrant the performance or results that
* may be obtained by using this software or data. The NLM and the U.S.
* Government disclaim all warranties, express or implied, including
* warranties of performance, merchantability or fitness for any particular
* purpose.
*
* Please cite the author in any work or product based on this material.
*
* ===========================================================================
*
* Authors: Alejandro Schaffer, Mike Gertz (ported to algo/blast by Tom Madden)
*
*/
/** @file blast_kappa.c
* Utilities for doing Smith-Waterman alignments and adjusting the scoring
* system for each match in blastpgp
*/
#include <float.h>
#include <algo/blast/core/ncbi_math.h>
#include <algo/blast/core/blast_hits.h>
#include <algo/blast/core/blast_kappa.h>
#include <algo/blast/core/blast_util.h>
#include <algo/blast/core/blast_gapalign.h>
#include <algo/blast/core/blast_filter.h>
#include <algo/blast/core/blast_traceback.h>
#include <algo/blast/core/link_hsps.h>
#include <algo/blast/core/gencode_singleton.h>
#include "blast_psi_priv.h"
#include "blast_gapalign_priv.h"
#include "blast_hits_priv.h"
#include "blast_posit.h"
#include "blast_hspstream_mt_utils.h"
#include "blast_traceback_mt_priv.h"
#ifdef _OPENMP
#include <omp.h>
# ifdef _WIN32
/* stderr expands to (__acrt_iob_func(2)), which won't work in an OpenMP
* shared(...) list. */
# define STDERR_COMMA
# else
# define STDERR_COMMA stderr,
# endif
#endif
#include <algo/blast/composition_adjustment/nlm_linear_algebra.h>
#include <algo/blast/composition_adjustment/compo_heap.h>
#include <algo/blast/composition_adjustment/redo_alignment.h>
#include <algo/blast/composition_adjustment/matrix_frequency_data.h>
#include <algo/blast/composition_adjustment/unified_pvalues.h>
/* Define KAPPA_PRINT_DIAGNOSTICS to turn on printing of
* diagnostic information from some routines. */
/** Compile-time option; if set to a true value, then blastp runs
that use Blast_RedoAlignmentCore to compute the traceback will not
SEG the subject sequence */
#ifndef KAPPA_BLASTP_NO_SEG_SEQUENCE
#define KAPPA_BLASTP_NO_SEG_SEQUENCE 0
#endif
/** Compile-time option; if set to a true value, then blastp runs
that use Blast_RedoAlignmentCore to compute the traceback will not
SEG the subject sequence */
#ifndef KAPPA_TBLASTN_NO_SEG_SEQUENCE
#define KAPPA_TBLASTN_NO_SEG_SEQUENCE 0
#endif
/**
* Given a list of HSPs with (possibly) high-precision scores, rescale
* the scores to have standard precision and set the scale-independent
* bit scores. This routine does *not* resort the list; it is assumed
* that the list is already sorted according to e-values that have been
* computed using the initial, higher-precision scores.
*
* @param hsp_list the HSP list
* @param logK Karlin-Altschul statistical parameter [in]
* @param lambda Karlin-Altschul statistical parameter [in]
* @param scoreDivisor the value by which reported scores are to be
*/
static void
s_HSPListNormalizeScores(BlastHSPList * hsp_list,
double lambda,
double logK,
double scoreDivisor)
{
int hsp_index;
for(hsp_index = 0; hsp_index < hsp_list->hspcnt; hsp_index++) {
BlastHSP * hsp = hsp_list->hsp_array[hsp_index];
hsp->score = BLAST_Nint(((double) hsp->score) / scoreDivisor);
/* Compute the bit score using the newly computed scaled score. */
hsp->bit_score = (hsp->score*lambda*scoreDivisor - logK)/NCBIMATH_LN2;
}
}
/**
* Adjusts the E-values in a BLAST_HitList to be composites of
* a composition-based P-value and a score/alignment-based P-value
*
* @param hsp_list the hitlist whose E-values need to be adjusted
* @param comp_p_value P-value from sequence composition
* @param seqSrc a source of sequence data
* @param subject_length length of database sequence
* @param query_context info about this query context; needed when
* multiple queries are being used
* @param LambdaRatio the ratio between the observed value of Lambda
* and the predicted value of lambda (used to print
* diagnostics)
* @param subject_id the subject id of this sequence (used to print
* diagnostics)
**/
static void
s_AdjustEvaluesForComposition(
BlastHSPList *hsp_list,
double comp_p_value,
const BlastSeqSrc* seqSrc,
Int4 subject_length,
const BlastContextInfo * query_context,
double LambdaRatio,
int subject_id)
{
/* Smallest observed evalue after adjustment */
double best_evalue = DBL_MAX;
/* True length of the query */
int query_length = query_context->query_length;
/* Length adjustment to compensate for edge effects */
int length_adjustment = query_context->length_adjustment;
/* Effective lengths of the query, subject, and database */
double query_eff = MAX((query_length - length_adjustment), 1);
double subject_eff = MAX((subject_length - length_adjustment), 1.0);
double dblen_eff = (double) query_context->eff_searchsp / query_eff;
/* Scale factor to convert the database E-value to the sequence E-value */
double db_to_sequence_scale = subject_eff / dblen_eff;
int hsp_index;
for (hsp_index = 0; hsp_index < hsp_list->hspcnt; hsp_index++) {
/* for all HSPs */
double align_p_value; /* P-value for the alignment score */
double combined_p_value; /* combination of two P-values */
/* HSP for this iteration */
BlastHSP * hsp = hsp_list->hsp_array[hsp_index];
#ifdef KAPPA_PRINT_DIAGNOSTICS
/* Original E-value, saved if diagnostics are printed. */
double old_e_value = hsp->evalue;
#endif
hsp->evalue *= db_to_sequence_scale;
align_p_value = BLAST_KarlinEtoP(hsp->evalue);
combined_p_value = Blast_Overall_P_Value(comp_p_value,align_p_value);
hsp->evalue = BLAST_KarlinPtoE(combined_p_value);
hsp->evalue /= db_to_sequence_scale;
if (hsp->evalue < best_evalue) {
best_evalue = hsp->evalue;
}
#ifdef KAPPA_PRINT_DIAGNOSTICS
if (seqSrc){
int sequence_gi; /*GI of a sequence*/
Blast_GiList* gi_list; /*list of GI's for a sequence*/
gi_list = BlastSeqSrcGetGis(seqSrc, (void *) (&subject_id));
if ((gi_list) && (gi_list->num_used > 0)) {
sequence_gi = gi_list->data[0];
} else {
sequence_gi = (-1);
}
printf("GI %d Lambda ratio %e comp. p-value %e; "
"adjust E-value of query length %d match length "
"%d from %e to %e\n",
sequence_gi, LambdaRatio, comp_p_value,
query_length, subject_length, old_e_value, hsp->evalue);
Blast_GiListFree(gi_list);
}
#endif
} /* end for all HSPs */
hsp_list->best_evalue = best_evalue;
/* suppress unused parameter warnings if diagnostics are not printed */
(void) seqSrc;
(void) query_length;
(void) LambdaRatio;
(void) subject_id;
}
/**
* Remove from a hitlist all HSPs that are completely contained in an
* HSP that occurs earlier in the list and that:
* - is on the same strand; and
* - has equal or greater score. T
* The hitlist should be sorted by some measure of significance before
* this routine is called.
* @param hsp_array array to be reaped
* @param hspcnt length of hsp_array
*/
static void
s_HitlistReapContained(BlastHSP * hsp_array[], Int4 * hspcnt)
{
Int4 iread; /* iteration index used to read the hitlist */
Int4 iwrite; /* iteration index used to write to the hitlist */
Int4 old_hspcnt; /* number of HSPs in the hitlist on entry */
old_hspcnt = *hspcnt;
for (iread = 1; iread < *hspcnt; iread++) {
/* for all HSPs in the hitlist */
Int4 ireadBack; /* iterator over indices less than iread */
BlastHSP *hsp1; /* an HSP that is a candidate for deletion */
hsp1 = hsp_array[iread];
for (ireadBack = 0; ireadBack < iread && hsp1 != NULL; ireadBack++) {
/* for all HSPs before hsp1 in the hitlist and while hsp1
* has not been deleted */
BlastHSP *hsp2; /* an HSP that occurs earlier in hsp_array
* than hsp1 */
hsp2 = hsp_array[ireadBack];
if( hsp2 == NULL ) { /* hsp2 was deleted in a prior iteration. */
continue;
}
if (hsp2->query.frame == hsp1->query.frame &&
hsp2->subject.frame == hsp1->subject.frame) {
/* hsp1 and hsp2 are in the same query/subject frame. */
if (CONTAINED_IN_HSP
(hsp2->query.offset, hsp2->query.end, hsp1->query.offset,
hsp2->subject.offset, hsp2->subject.end,
hsp1->subject.offset) &&
CONTAINED_IN_HSP
(hsp2->query.offset, hsp2->query.end, hsp1->query.end,
hsp2->subject.offset, hsp2->subject.end,
hsp1->subject.end) &&
hsp1->score <= hsp2->score) {
hsp1 = hsp_array[iread] = Blast_HSPFree(hsp_array[iread]);
}
} /* end if hsp1 and hsp2 are in the same query/subject frame */
} /* end for all HSPs before hsp1 in the hitlist */
} /* end for all HSPs in the hitlist */
/* Condense the hsp_array, removing any NULL items. */
iwrite = 0;
for (iread = 0; iread < *hspcnt; iread++) {
if (hsp_array[iread] != NULL) {
hsp_array[iwrite++] = hsp_array[iread];
}
}
*hspcnt = iwrite;
/* Fill the remaining memory in hsp_array with NULL pointers. */
for ( ; iwrite < old_hspcnt; iwrite++) {
hsp_array[iwrite] = NULL;
}
}
/** A callback used to free an EditScript that has been stored in a
* BlastCompo_Alignment. */
static void s_FreeEditScript(void * edit_script)
{
if (edit_script != NULL)
GapEditScriptDelete(edit_script);
}
/**
* Converts a list of objects of type BlastCompo_Alignment to an
* new object of type BlastHSPList and returns the result. Conversion
* in this direction is lossless. The list passed to this routine is
* freed to ensure that there is no aliasing of fields between the
* list of BlastCompo_Alignments and the new hitlist.
*
* @param hsp_list The hsp_list to populate
* @param alignments A list of distinct alignments; freed before return [in]
* @param oid Ordinal id of a database sequence [in]
* @param queryInfo information about all queries in this search [in]
* @param frame query frame
* @return Allocated and filled BlastHSPList structure.
*/
static int
s_HSPListFromDistinctAlignments(BlastHSPList *hsp_list,
BlastCompo_Alignment ** alignments,
int oid,
const BlastQueryInfo* queryInfo,
int frame)
{
int status = 0; /* return code for any routine called */
static const int unknown_value = 0; /* dummy constant to use when a
parameter value is not known */
BlastCompo_Alignment * align; /* an alignment in the list */
if (hsp_list == NULL) {
return -1;
}
hsp_list->oid = oid;
for (align = *alignments; NULL != align; align = align->next) {
BlastHSP * new_hsp = NULL;
GapEditScript * editScript = align->context;
align->context = NULL;
status = Blast_HSPInit(align->queryStart, align->queryEnd,
align->matchStart, align->matchEnd,
unknown_value, unknown_value,
align->queryIndex,
frame, (Int2) align->frame, align->score,
&editScript, &new_hsp);
switch (align->matrix_adjust_rule) {
case eDontAdjustMatrix:
new_hsp->comp_adjustment_method = eNoCompositionBasedStats;
break;
case eCompoScaleOldMatrix:
new_hsp->comp_adjustment_method = eCompositionBasedStats;
break;
default:
new_hsp->comp_adjustment_method = eCompositionMatrixAdjust;
break;
}
if (status != 0)
break;
/* At this point, the subject and possibly the query sequence have
* been filtered; since it is not clear that num_ident of the
* filtered sequences, rather than the original, is desired,
* explicitly leave num_ident blank. */
new_hsp->num_ident = 0;
status = Blast_HSPListSaveHSP(hsp_list, new_hsp);
if (status != 0)
break;
}
if (status == 0) {
BlastCompo_AlignmentsFree(alignments, s_FreeEditScript);
Blast_HSPListSortByScore(hsp_list);
} else {
hsp_list = Blast_HSPListFree(hsp_list);
}
return 0;
}
Int4 s_GetSubjectLength(Int4 total_subj_length, EBlastProgramType program_number)
{
return ((program_number == eBlastTypeRpsTblastn) ?
(GET_NUCL_LENGTH(total_subj_length) - 1 ) /3 : total_subj_length);
}
/**
* Adding evalues to a list of HSPs and remove those that do not have
* sufficiently good (low) evalue.
*
* @param *pbestScore best (highest) score in the list
* @param *pbestEvalue best (lowest) evalue in the list
* @param hsp_list the list
* @param seqSrc a source of sequence data
* @param subject_length length of the subject sequence
* @param program_number the type of BLAST search being performed
* @param queryInfo information about the queries
* @param context_index the index of the query corresponding to
* the HSPs in hsp_list
* @param sbp the score block for this search
* @param hitParams parameters used to assign evalues and
* decide whether to save hits.
* @param pvalueForThisPair composition p-value
* @param LambdaRatio lambda ratio, if available
* @param subject_id index of subject
*
* @return 0 on success; -1 on failure (can fail because some methods
* of generating evalues use auxiliary structures)
*/
static int
s_HitlistEvaluateAndPurge(int * pbestScore, double *pbestEvalue,
BlastHSPList * hsp_list,
const BlastSeqSrc* seqSrc,
int subject_length,
EBlastProgramType program_number,
const BlastQueryInfo* queryInfo,
int context_index,
BlastScoreBlk* sbp,
const BlastHitSavingParameters* hitParams,
double pvalueForThisPair,
double LambdaRatio,
int subject_id)
{
int status = 0;
*pbestEvalue = DBL_MAX;
*pbestScore = 0;
if (hitParams->do_sum_stats) {
status = BLAST_LinkHsps(program_number, hsp_list, queryInfo,
subject_length, sbp,
hitParams->link_hsp_params, TRUE);
} else {
status =
Blast_HSPListGetEvalues(program_number, queryInfo,
s_GetSubjectLength(subject_length, program_number),
hsp_list, TRUE, FALSE, sbp,
0.0, /* use a non-zero gap decay
only when linking HSPs */
1.0); /* Use scaling factor equal to
1, because both scores and
Lambda are scaled, so they
will cancel each other. */
}
if (eBlastTypeBlastp == program_number ||
eBlastTypeBlastx == program_number) {
if ((0 <= pvalueForThisPair) && (pvalueForThisPair <= 1)) {
s_AdjustEvaluesForComposition(hsp_list, pvalueForThisPair, seqSrc,
subject_length,
&queryInfo->contexts[context_index],
LambdaRatio, subject_id);
}
}
if (status == 0) {
Blast_HSPListReapByEvalue(hsp_list, hitParams->options);
if (hsp_list->hspcnt > 0) {
*pbestEvalue = hsp_list->best_evalue;
*pbestScore = hsp_list->hsp_array[0]->score;
}
}
return status == 0 ? 0 : -1;
}
/** Compute the number of identities for the HSPs in the hsp_list
* @note Should work for blastp and tblastn now.
*
* @param query_blk the query sequence data [in]
* @param query_info structure describing the query_blk structure [in]
* @param seq_src source of subject sequence data [in]
* @param hsp_list list of HSPs to be processed [in|out]
* @param scoring_options scoring options [in]
* @gen_code_string Genetic code for tblastn [in]
*/
static void
s_ComputeNumIdentities(const BLAST_SequenceBlk* query_blk,
const BlastQueryInfo* query_info,
BLAST_SequenceBlk* subject_blk,
const BlastSeqSrc* seq_src,
BlastHSPList* hsp_list,
const BlastScoringOptions* scoring_options,
const Uint1* gen_code_string,
const BlastScoreBlk* sbp,
BlastSeqSrcSetRangesArg * ranges)
{
Uint1* query = NULL;
Uint1* query_nomask = NULL;
Uint1* subject = NULL;
const EBlastProgramType program_number = scoring_options->program_number;
const Boolean kIsOutOfFrame = scoring_options->is_ooframe;
const EBlastEncoding encoding = Blast_TracebackGetEncoding(program_number);
BlastSeqSrcGetSeqArg seq_arg;
Int2 status = 0;
int i;
SBlastTargetTranslation* target_t = NULL;
if ( !hsp_list) return;
/* Initialize the subject */
if (seq_src){
memset((void*) &seq_arg, 0, sizeof(seq_arg));
seq_arg.oid = hsp_list->oid;
seq_arg.encoding = encoding;
seq_arg.check_oid_exclusion = TRUE;
seq_arg.ranges = ranges;
status = BlastSeqSrcGetSequence(seq_src, (void*) &seq_arg);
ASSERT(status == 0);
(void)status; /* to pacify compiler warning */
if (program_number == eBlastTypeTblastn) {
subject_blk = seq_arg.seq;
BlastTargetTranslationNew(
subject_blk,
gen_code_string,
eBlastTypeTblastn,
kIsOutOfFrame,
&target_t
);
} else {
subject = seq_arg.seq->sequence;
}
} else {
subject = subject_blk->sequence;
}
for (i = 0; i < hsp_list->hspcnt; i++) {
BlastHSP* hsp = hsp_list->hsp_array[i];
/* Initialize the query */
if (program_number == eBlastTypeBlastx && kIsOutOfFrame) {
Int4 context = hsp->context - hsp->context % CODON_LENGTH;
Int4 context_offset = query_info->contexts[context].query_offset;
query = query_blk->oof_sequence + CODON_LENGTH + context_offset;
query_nomask = query_blk->oof_sequence + CODON_LENGTH + context_offset;
} else {
query = query_blk->sequence +
query_info->contexts[hsp->context].query_offset;
query_nomask = query_blk->sequence_nomask +
query_info->contexts[hsp->context].query_offset;
}
/* Translate subject if needed. */
if (program_number == eBlastTypeTblastn) {
const Uint1* target_sequence = Blast_HSPGetTargetTranslation(target_t, hsp, NULL);
status = Blast_HSPGetNumIdentitiesAndPositives(query, target_sequence, hsp, scoring_options, 0, sbp);
}
else
status = Blast_HSPGetNumIdentitiesAndPositives(query_nomask, subject, hsp, scoring_options, 0, sbp);
ASSERT(status == 0);
}
target_t = BlastTargetTranslationFree(target_t);
if (seq_src) {
BlastSeqSrcReleaseSequence(seq_src, (void*) &seq_arg);
BlastSequenceBlkFree(seq_arg.seq);
}
}
/**
* A callback routine: compute lambda for the given score
* probabilities.
* (@sa calc_lambda_type).
*/
static double
s_CalcLambda(double probs[], int min_score, int max_score, double lambda0)
{
int i; /* loop index */
int score_range; /* range of possible scores */
double avg; /* expected score of aligning two characters */
Blast_ScoreFreq freq; /* score frequency data */
score_range = max_score - min_score + 1;
avg = 0.0;
for (i = 0; i < score_range; i++) {
avg += (min_score + i) * probs[i];
}
freq.score_min = min_score;
freq.score_max = max_score;
freq.obs_min = min_score;
freq.obs_max = max_score;
freq.sprob0 = probs;
freq.sprob = &probs[-min_score];
freq.score_avg = avg;
return Blast_KarlinLambdaNR(&freq, lambda0);
}
/** Fill a two-dimensional array with the frequency ratios that
* underlie a position specific score matrix (PSSM).
*
* @param returnRatios a two-dimensional array with BLASTAA_SIZE
* columns
* @param numPositions the number of rows in returnRatios
* @param query query sequence data, of length numPositions
* @param matrixName the name of the position independent matrix
* corresponding to this PSSM
* @param startNumerator position-specific data used to generate the
* PSSM
* @return 0 on success; -1 if the named matrix isn't known, or if
* there was a memory error
* @todo find out what start numerator is.
*/
static int
s_GetPosBasedStartFreqRatios(double ** returnRatios,
Int4 numPositions,
Uint1 * query,
const char *matrixName,
double **startNumerator)
{
Int4 i,j; /* loop indices */
SFreqRatios * stdFreqRatios = NULL; /* frequency ratios for the
named matrix. */
double *standardProb; /* probabilities of each
letter*/
const double kPosEpsilon = 0.0001; /* values below this cutoff
are treated specially */
stdFreqRatios = _PSIMatrixFrequencyRatiosNew(matrixName);
if (stdFreqRatios == NULL) {
return -1;
}
for (i = 0; i < numPositions; i++) {
for (j = 0; j < BLASTAA_SIZE; j++) {
returnRatios[i][j] = stdFreqRatios->data[query[i]][j];
}
}
stdFreqRatios = _PSIMatrixFrequencyRatiosFree(stdFreqRatios);
standardProb = BLAST_GetStandardAaProbabilities();
if(standardProb == NULL) {
return -1;
}
/*reverse multiplication done in posit.c*/
for (i = 0; i < numPositions; i++) {
for (j = 0; j < BLASTAA_SIZE; j++) {
if ((standardProb[query[i]] > kPosEpsilon) &&
(standardProb[j] > kPosEpsilon) &&
(j != eStopChar) && (j != eXchar) &&
(startNumerator[i][j] > kPosEpsilon)) {
returnRatios[i][j] = startNumerator[i][j] / standardProb[j];
}
}
}
sfree(standardProb);
return 0;
}
/**
* Fill a two-dimensional array with the frequency ratios that underlie the
* named score matrix.
*
* @param returnRatios a two-dimensional array of size
* BLASTAA_SIZE x BLASTAA_SIZE
* @param matrixName the name of a matrix
* @return 0 on success; -1 if the named matrix isn't known, or if
* there was a memory error
*/
static int
s_GetStartFreqRatios(double ** returnRatios,
const char *matrixName)
{
/* Loop indices */
int i,j;
/* Frequency ratios for the matrix */
SFreqRatios * stdFreqRatios = NULL;
stdFreqRatios = _PSIMatrixFrequencyRatiosNew(matrixName);
if (stdFreqRatios == NULL) {
return -1;
}
for (i = 0; i < BLASTAA_SIZE; i++) {
for (j = 0; j < BLASTAA_SIZE; j++) {
returnRatios[i][j] = stdFreqRatios->data[i][j];
}
}
stdFreqRatios = _PSIMatrixFrequencyRatiosFree(stdFreqRatios);
return 0;
}
/** SCALING_FACTOR is a multiplicative factor used to get more bits of
* precision in the integer matrix scores. It cannot be arbitrarily
* large because we do not want total alignment scores to exceed
* -(BLAST_SCORE_MIN) */
#define SCALING_FACTOR 32
/**
* Produce a scaled-up version of the position-specific matrix
* with a given set of position-specific residue frequencies.
*
* @param fillPosMatrix is the matrix to be filled
* @param matrixName name of the standard substitution matrix [in]
* @param posFreqs PSSM's frequency ratios [in]
* @param query Query sequence data [in]
* @param queryLength Length of the query sequence above [in]
* @param sbp stores various parameters of the search
* @param scale_factor amount by which ungapped parameters should be
* scaled.
* @return 0 on success; -1 on failure
*/
static int
s_ScalePosMatrix(int ** fillPosMatrix,
const char * matrixName,
double ** posFreqs,
Uint1 * query,
int queryLength,
BlastScoreBlk* sbp,
double scale_factor)
{
/* Data used by scaling routines */
Kappa_posSearchItems *posSearch = NULL;
/* A reduced collection of search parameters used by PSI-blast */
Kappa_compactSearchItems *compactSearch = NULL;
/* Representation of a PSSM internal to PSI-blast */
_PSIInternalPssmData* internal_pssm = NULL;
/* return code */
int status = 0;
posSearch = Kappa_posSearchItemsNew(queryLength, matrixName,
fillPosMatrix, posFreqs);
compactSearch = Kappa_compactSearchItemsNew(query, queryLength, sbp);
/* Copy data into new structures */
internal_pssm = _PSIInternalPssmDataNew(queryLength, BLASTAA_SIZE);
if (posSearch == NULL || compactSearch == NULL || internal_pssm == NULL) {
status = -1;
goto cleanup;
}
_PSICopyMatrix_int(internal_pssm->pssm, posSearch->posMatrix,
internal_pssm->ncols, internal_pssm->nrows);
_PSICopyMatrix_int(internal_pssm->scaled_pssm,
posSearch->posPrivateMatrix,
internal_pssm->ncols, internal_pssm->nrows);
_PSICopyMatrix_double(internal_pssm->freq_ratios,
posSearch->posFreqs, internal_pssm->ncols,
internal_pssm->nrows);
status = _PSIConvertFreqRatiosToPSSM(internal_pssm, query, sbp,
compactSearch->standardProb);
if (status != 0) {
goto cleanup;
}
/* Copy data from new structures to posSearchItems */
_PSICopyMatrix_int(posSearch->posMatrix, internal_pssm->pssm,
internal_pssm->ncols, internal_pssm->nrows);
_PSICopyMatrix_int(posSearch->posPrivateMatrix,
internal_pssm->scaled_pssm,
internal_pssm->ncols, internal_pssm->nrows);
_PSICopyMatrix_double(posSearch->posFreqs,
internal_pssm->freq_ratios,
internal_pssm->ncols, internal_pssm->nrows);
status = Kappa_impalaScaling(posSearch, compactSearch, (double)
scale_factor, FALSE, sbp);
cleanup:
internal_pssm = _PSIInternalPssmDataFree(internal_pssm);
posSearch = Kappa_posSearchItemsFree(posSearch);
compactSearch = Kappa_compactSearchItemsFree(compactSearch);
return status;
}
/**
* Convert an array of HSPs to a list of BlastCompo_Alignment objects.
* The context field of each BlastCompo_Alignment is set to point to the
* corresponding HSP.
*
* @param self the array of alignment to be filled
* @param numAligns number of alignments
* @param hsp_array an array of HSPs
* @param hspcnt the length of hsp_array
* @param init_context the initial context to process
* @param queryInfo information about the concatenated query
* @param localScalingFactor the amount by which this search is scaled
*
* @return the new list of alignments; or NULL if there is an out-of-memory
* error (or if the original array is empty)
*/
static int
s_ResultHspToDistinctAlign(BlastCompo_Alignment **self,
int *numAligns,
BlastHSP * hsp_array[], Int4 hspcnt,
int init_context,
const BlastQueryInfo* queryInfo,
double localScalingFactor)
{
BlastCompo_Alignment * tail[6]; /* last element in aligns */
int hsp_index; /* loop index */
int frame_index;
for (frame_index = 0; frame_index < 6; frame_index++) {
tail[frame_index] = NULL;
numAligns[frame_index] = 0;
}
for (hsp_index = 0; hsp_index < hspcnt; hsp_index++) {
BlastHSP * hsp = hsp_array[hsp_index]; /* current HSP */
BlastCompo_Alignment * new_align; /* newly-created alignment */
frame_index = hsp->context - init_context;
ASSERT(frame_index < 6 && frame_index >= 0);
/* Incoming alignments will have coordinates of the query
portion relative to a particular query context; they must
be shifted for used in the composition_adjustment library.
*/
new_align =
BlastCompo_AlignmentNew((int) (hsp->score * localScalingFactor),
eDontAdjustMatrix,
hsp->query.offset, hsp->query.end, hsp->context,
hsp->subject.offset, hsp->subject.end,
hsp->subject.frame, hsp);
if (new_align == NULL) /* out of memory */
return -1;
if (tail[frame_index] == NULL) { /* if the list aligns is empty; */
/* make new_align the first element in the list */
self[frame_index] = new_align;
} else {
/* otherwise add new_align to the end of the list */
tail[frame_index]->next = new_align;
}
tail[frame_index] = new_align;
numAligns[frame_index]++;
}
return 0;
}
/**
* Redo a S-W alignment using an x-drop alignment. The result will
* usually be the same as the S-W alignment. The call to ALIGN_EX
* attempts to force the endpoints of the alignment to match the
* optimal endpoints determined by the Smith-Waterman algorithm.
* ALIGN_EX is used, so that if the data structures for storing BLAST
* alignments are changed, the code will not break
*
* @param query the query data
* @param queryStart start of the alignment in the query sequence
* @param queryEnd end of the alignment in the query sequence,
* as computed by the Smith-Waterman algorithm
* @param subject the subject (database) sequence
* @param matchStart start of the alignment in the subject sequence
* @param matchEnd end of the alignment in the query sequence,
* as computed by the Smith-Waterman algorithm
* @param gap_align parameters for a gapped alignment
* @param scoringParams Settings for gapped alignment.[in]
* @param score score computed by the Smith-Waterman algorithm
* @param queryAlignmentExtent length of the alignment in the query sequence,
* as computed by the x-drop algorithm
* @param matchAlignmentExtent length of the alignment in the subject
* sequence, as computed by the x-drop algorithm
* @param newScore alignment score computed by the x-drop
* algorithm
*/
static void
s_SWFindFinalEndsUsingXdrop(BlastCompo_SequenceData * query,
Int4 queryStart,
Int4 queryEnd,
BlastCompo_SequenceData * subject,
Int4 matchStart,
Int4 matchEnd,
BlastGapAlignStruct* gap_align,
const BlastScoringParameters* scoringParams,
Int4 score,
Int4 * queryAlignmentExtent,
Int4 * matchAlignmentExtent,
Int4 * newScore)
{
Int4 XdropAlignScore; /* alignment score obtained using X-dropoff
* method rather than Smith-Waterman */
Int4 doublingCount = 0; /* number of times X-dropoff had to be
* doubled */
Int4 gap_x_dropoff_orig = gap_align->gap_x_dropoff;
GapPrelimEditBlockReset(gap_align->rev_prelim_tback);
GapPrelimEditBlockReset(gap_align->fwd_prelim_tback);
do {
XdropAlignScore =
ALIGN_EX(&(query->data[queryStart]) - 1,
&(subject->data[matchStart]) - 1,
queryEnd - queryStart + 1, matchEnd - matchStart + 1,
queryAlignmentExtent,
matchAlignmentExtent, gap_align->fwd_prelim_tback,
gap_align, scoringParams, queryStart - 1, FALSE, FALSE,
NULL);
gap_align->gap_x_dropoff *= 2;
doublingCount++;
if((XdropAlignScore < score) && (doublingCount < 3)) {
GapPrelimEditBlockReset(gap_align->fwd_prelim_tback);
}
} while((XdropAlignScore < score) && (doublingCount < 3));
gap_align->gap_x_dropoff = gap_x_dropoff_orig;
*newScore = XdropAlignScore;
}
/**
* BLAST-specific information that is associated with a
* BlastCompo_MatchingSequence.
*/
typedef struct
BlastKappa_SequenceInfo {
EBlastProgramType prog_number; /**< identifies the type of blast
search being performed. The type
of search determines how sequence
data should be obtained. */
const BlastSeqSrc* seq_src; /**< BLAST sequence data source */
BlastSeqSrcGetSeqArg seq_arg; /**< argument to GetSequence method
of the BlastSeqSrc (@todo this
structure was designed to be
allocated on the stack, i.e.: in
Kappa_MatchingSequenceInitialize) */
} BlastKappa_SequenceInfo;
/** Release the resources associated with a matching sequence. */
static void
s_MatchingSequenceRelease(BlastCompo_MatchingSequence * self)
{
if (self != NULL) {
if (self->index >=0) {
BlastKappa_SequenceInfo * local_data = self->local_data;
if (self->length > 0) {
BlastSeqSrcReleaseSequence(local_data->seq_src,
&local_data->seq_arg);
BlastSequenceBlkFree(local_data->seq_arg.seq);
}
free(self->local_data);
}
self->local_data = NULL;
}
}
/**
* Do a simple gapped extension to the right from the beginning of query and
* subject ranges examining only matches and mismatches. The extension stops
* when there are more than max_shift mismatches or mismatches or gaps are not
* followed by two identical matches. This is a simplified version of the
* Danielle and Jean Thierry-Miegs' jumper
* alignment implemented in NCBI Magic
* http://www.ncbi.nlm.nih.gov/IEB/Research/Acembly/Download/Downloads.html
*
* @param query_seq Query sequence [in]
* @param query_len Query length [in]
* @param subject_seq Subject sequence [in]
* @param subject_len Subject length [in]
* @param max_shift Maximum number of mismatches or gaps, extension stops if
* this number is reached [in]
* @param query_ext_len Extension length on the query [out]
* @param subject_ext_len Extension length on the subject [out]
* @param align_len Alignment length [out]
* @return Number of identical residues
*/
static int s_ExtendRight(Uint1* query_seq, int query_len,
Uint1* subject_seq, int subject_len,
int max_shift,
int* query_ext_len, int* subject_ext_len,
int* align_len)
{
int num_identical = 0;
int q_pos, s_pos;
int gaps_in_query = 0;
int gaps_in_subject = 0;
q_pos = 0;
s_pos = 0;
while (q_pos < query_len && s_pos < subject_len) {
int n;
int match = 0;
while (q_pos < query_len && s_pos < subject_len
&& query_seq[q_pos] == subject_seq[s_pos]) {
num_identical++;
q_pos++;
s_pos++;
}
/* try to skip mismatches or gaps */
for (n=1; n < max_shift && q_pos + n + 1 < query_len
&& s_pos + n + 1 < subject_len && !match; n++) {
/* mismatches */
if (query_seq[q_pos + n] == subject_seq[s_pos + n]
&& query_seq[q_pos + n + 1] == subject_seq[s_pos + n + 1]) {
/* we have already checked that two positions behind mismatches
match so we can advance further */
q_pos += n + 2;
s_pos += n + 2;
num_identical += 2;
match = 1;
}
/* gap in subject */
if (!match && query_seq[q_pos + n] == subject_seq[s_pos]
&& query_seq[q_pos + n + 1] == subject_seq[s_pos + 1]) {
q_pos += n + 2;
s_pos += 2;
num_identical += 2;
gaps_in_subject += n;
match = 1;
}
/* gap in query */
if (!match && query_seq[q_pos] == subject_seq[s_pos + n]
&& query_seq[q_pos + 1] == subject_seq[s_pos + n + 1]) {
q_pos += 2;
s_pos += n + 2;
num_identical += 2;
gaps_in_query += n;
match = 1;
}
}
if (match) {
continue;
}
/* exit the loop */
break;
}
*query_ext_len = q_pos;
*subject_ext_len = s_pos;
*align_len = q_pos > s_pos ? q_pos + gaps_in_query : s_pos + gaps_in_subject;
return num_identical;
}
/**
* Extend left from the end of the sequence and subject ranges and count
* identities. The extension stops when there are more than max_shift
* mismatches or mismatches or gaps are not followed by two identical matches.
* See description for s_ExtendRight for more details.
*
* @param query_seq Query sequence [in]
* @param query_len Query length [in]
* @param subject_seq Subject sequence [in]
* @param subject_len Subject length [in]
* @param max_shift Maximum number of mismatches or gaps, extension stops if
* this number is reached [in]
* @param query_ext_len Extension length on the query [out]
* @param subject_ext_len Extension length on the subject [out]
* @param align_len Alignment length [out]
* @return Number of identical residues
*/
static int s_ExtendLeft(Uint1* query_seq, int query_len,
Uint1* subject_seq, int subject_len,
int max_shift,
int* query_ext_len, int* subject_ext_len,
int* align_len)
{
int q_pos = query_len - 1;
int s_pos = subject_len - 1;
int num_identical = 0;
int gaps_in_query = 0;
int gaps_in_subject = 0;
while (q_pos >= 0 && s_pos >= 0) {
int n;
int match = 0;
/* process identies */
while (q_pos > 0 && s_pos > 0 && query_seq[q_pos] == subject_seq[s_pos]) {
num_identical++;
q_pos--;
s_pos--;
}
/* try to skip mismatches or gaps */
for (n=1;n < max_shift && q_pos - n - 1 > 0 && s_pos - n - 1 > 0
&& !match; n++) {
/* mismatch */
if (query_seq[q_pos - n] == subject_seq[s_pos - n]
&& query_seq[q_pos - n - 1] == subject_seq[s_pos - n - 1]) {
q_pos -= n + 2;
s_pos -= n + 2;
num_identical += 2;
match = 1;
}
/* gap in subject */
if (!match && query_seq[q_pos - n] == subject_seq[s_pos]
&& query_seq[q_pos - n - 1] == subject_seq[s_pos - 1]) {
q_pos -= n + 2;
s_pos -= 2;
num_identical += 2;
gaps_in_subject += n;
match = 1;
}
/* gap in query */
if (!match && query_seq[q_pos] == subject_seq[s_pos - n]
&& query_seq[q_pos - 1] == subject_seq[s_pos - n - 1]) {
q_pos -= 2;
s_pos -= n + 2;
num_identical += 2;
gaps_in_query += n;
match = 1;
}
}
if (match) {
continue;
}
break;
}
*query_ext_len = query_len - q_pos - 1;
*subject_ext_len = subject_len - s_pos - 1;
*align_len += *query_ext_len > *subject_ext_len ?
*query_ext_len + gaps_in_query : *subject_ext_len + gaps_in_subject;
return num_identical;
}
/**
* Get hash for a word of word_size residues assuming 28-letter alphabet
*
* @param data Sequence [in]
* @param word_size Word size [in]
* @return Hash value
*/
static Uint8 s_GetHash(const Uint1* data, int word_size)
{
Uint8 hash = 0;
int k;
for (k=0;k < word_size;k++) {
hash <<= 5;
hash += (Int8)data[k];
}
return hash;
}
/**
* Find a local number of identical residues in two aligned sequences by
* finding word matches and doing a simple gapped extensions from the word hits
*
* @param query_seq Query sequence [in]
* @param query_hashes Array of query words with index of each word
* corresponding to word position in the query [in]
* @param query_len Query length [in]
* @param subject_seq Subject sequence [in]
* @param subject_len Subject length [in]
* @param max_shift Maximum number of local mismatches or gaps for extensions
* [in]
* @return Number of identical residues
*/
static int s_FindNumIdentical(Uint1* query_seq,
const Uint8* query_hashes,
int query_len,
Uint1* subject_seq,
int subject_len,
int max_shift)
{
int word_size = 8; /* word size for k-mer matching */
Uint8 hash = 0;
Uint8 mask = NCBI_CONST_UINT8(0xFFFFFFFFFF); /* mask for computing hash
values */
int query_from = 0;
int subject_from = 0;
int s_pos; /* position in the subject sequence */
int num_identical = 0; /* number of identical residues found */
Boolean match = FALSE;
/* if query or subject length is smaller than word size, exit */
if (!query_seq || !query_hashes || !subject_seq
|| query_len < word_size || subject_len < word_size) {
return 0;
}
/* for each subject position */
for (s_pos = 0; s_pos < subject_len - word_size; s_pos++) {
int q_pos;
/* find word hash */
if (s_pos == 0 || match) {
hash = s_GetHash(&subject_seq[s_pos], word_size);
}
else {
hash <<= 5;
hash &= mask;
hash += subject_seq[s_pos + word_size - 1];
}
/* find matching query word; index of hash is position of the word
the query */
for (q_pos = query_from;q_pos < query_len - word_size; q_pos++) {
if (query_hashes[q_pos] == hash) {
break;
}
}
/* if match */
if (q_pos < query_len - word_size) {
int query_start = q_pos;
int subject_start = s_pos;
int query_left_len, query_right_len;
int subject_left_len, subject_right_len;
int align_len_left=0, align_len_right=0;
match = TRUE;
num_identical += word_size;
/* extend left from word match */
num_identical += s_ExtendLeft(query_seq + query_from,
query_start - query_from,
subject_seq + subject_from,
subject_start - subject_from,
max_shift,
&query_left_len, &subject_left_len,
&align_len_left);
/* extend right from word match */
num_identical += s_ExtendRight(query_seq + query_start + word_size,
query_len - query_start - word_size,
subject_seq + subject_start + word_size,
subject_len - subject_start - word_size,
max_shift,
&query_right_len, &subject_right_len,
&align_len_right);
/* disregard already matched and extended words when matching
further positions */
query_from = query_start + word_size + query_right_len;
subject_from = subject_start + word_size + subject_right_len;
/* s_pos will be incremented in the loop */
s_pos = subject_from - 1;
}
else {
match = FALSE;
}
}
return num_identical;
}
/**
* Test whether the aligned parts of two sequences that
* have a high-scoring gapless alignment are nearly identical.
*
* First extend from the left end of the query and subject ranges and stop if
* there are too manu mismatches. Then extend from the right end. Then for the
* remaining protion of ths sequences find matching words and extend left and
* right from the word hit. Repeat the last steo until the whole alignment
* ranges are processed.
*
* @params seqData Subject sequence [in]
* @params seqOffse Starting offset of the subject sequence in alignment data
* [in]
* @params queryData Query sequence [in]
* @params queryOffset Starting offset of the query sequence in alignment data
* [in]
* @param query_words Array of query words with word index corresponding to
* word's position in the query [in]
* @param align Alignment data [in]
* @return True if sequence parts are nearly identical, false otherwise
*/
static Boolean
s_TestNearIdentical(const BlastCompo_SequenceData* seqData,
const int seqOffset,
const BlastCompo_SequenceData* queryData,
const int queryOffset,
const Uint8* query_words,
const BlastCompo_Alignment* align)
{
int qStart = align->queryStart - queryOffset;
/* align->queryEnd points to one position past alignment end */
int qEnd = align->queryEnd - queryOffset - 1;
int sStart = align->matchStart - seqOffset;
int sEnd = align->matchEnd - seqOffset - 1;
const double kMinFractionNearIdentical = 0.96;
int max_shift = 8;
int query_len = qEnd - qStart + 1;
int subject_len = sEnd - sStart + 1;
int align_len = MIN(query_len, subject_len);
int query_left_len = 0;
int subject_left_len = 0;
int query_right_len = 0;
int subject_right_len = 0;
int align_left_len = 0;
int align_right_len = 0;
double fraction_identical;
/* first find number of identies going from the beginning of the query
and subject ranges */
int num_identical = s_ExtendRight(queryData->data + qStart, query_len,
seqData->data + sStart, subject_len,
max_shift,
&query_right_len, &subject_right_len,
&align_right_len);
/* if the whole query range was processed return near identical status */
if (query_right_len >= query_len || subject_right_len >= subject_len) {
fraction_identical = (double)num_identical / (double)align_len;
ASSERT(fraction_identical - 1.0 < 1e-10);
return fraction_identical > kMinFractionNearIdentical;
}
/* find the number of identies going from the end of the query and subject
ranges */
num_identical += s_ExtendLeft(queryData->data + qStart + query_right_len,
query_len - query_right_len,
seqData->data + sStart + subject_right_len,
subject_len - subject_right_len,
max_shift,
&query_left_len, &subject_left_len,
&align_left_len);
/* if the whole alignment ranges where covered, return the near identical
status */
if (query_left_len + query_right_len >= query_len
|| subject_left_len + subject_right_len >= subject_len) {
fraction_identical = (double)num_identical / (double)(align_len);
ASSERT(fraction_identical - 1.0 < 1e-10);
return fraction_identical > kMinFractionNearIdentical;
}
/* find the number of identical matches in the middle portion of the
alignment ranges */
num_identical += s_FindNumIdentical(queryData->data + qStart + query_right_len,
query_words + qStart + query_right_len,
query_len - query_left_len - query_right_len,
seqData->data + sStart + subject_right_len,
subject_len - subject_left_len - subject_right_len,
max_shift);
fraction_identical = (double)num_identical / (double)align_len;
ASSERT(fraction_identical - 1.0 < 1e-10);
if (fraction_identical > kMinFractionNearIdentical) {
return TRUE;
}
else {
return FALSE;
}
}
/**
* Initialize a new matching sequence, obtaining information about the
* sequence from the search.
*
* @param self object to be initialized
* @param seqSrc A pointer to a source from which sequence data
* may be obtained
* @param program_number identifies the type of blast search being
* performed.
* @param default_db_genetic_code default genetic code to use when
* subject sequences are translated and there is
* no other guidance on what code to use
* @param subject_index index of the matching sequence in the database
*/
static int
s_MatchingSequenceInitialize(BlastCompo_MatchingSequence * self,
EBlastProgramType program_number,
const BlastSeqSrc* seqSrc,
Int4 default_db_genetic_code,
Int4 subject_index,
BlastSeqSrcSetRangesArg * ranges)
{
BlastKappa_SequenceInfo * seq_info; /* BLAST-specific sequence
information */
self->length = 0;
self->local_data = NULL;
seq_info = malloc(sizeof(BlastKappa_SequenceInfo));
if (seq_info != NULL) {
self->local_data = seq_info;
seq_info->seq_src = seqSrc;
seq_info->prog_number = program_number;
memset((void*) &seq_info->seq_arg, 0, sizeof(seq_info->seq_arg));
seq_info->seq_arg.oid = self->index = subject_index;
seq_info->seq_arg.check_oid_exclusion = TRUE;
seq_info->seq_arg.ranges = ranges;
if( program_number == eBlastTypeTblastn ) {
seq_info->seq_arg.encoding = eBlastEncodingNcbi4na;
} else {
seq_info->seq_arg.encoding = eBlastEncodingProtein;
}
if (BlastSeqSrcGetSequence(seqSrc, &seq_info->seq_arg) >= 0) {
self->length =
BlastSeqSrcGetSeqLen(seqSrc, (void*) &seq_info->seq_arg);
/* If the subject is translated and the BlastSeqSrc implementation
* doesn't provide a genetic code string, use the default genetic
* code for all subjects (as in the C toolkit) */
if (Blast_SubjectIsTranslated(program_number) &&
seq_info->seq_arg.seq->gen_code_string == NULL) {
seq_info->seq_arg.seq->gen_code_string =
GenCodeSingletonFind(default_db_genetic_code);
ASSERT(seq_info->seq_arg.seq->gen_code_string);
}
} else {
self->length = 0;
}
}
if (self->length == 0) {
/* Could not obtain the required data */
s_MatchingSequenceRelease(self);
return -1;
} else {
return 0;
}
}
/** NCBIstdaa encoding for 'X' character */
#define BLASTP_MASK_RESIDUE 21
/** Default instructions and mask residue for SEG filtering */
#define BLASTP_MASK_INSTRUCTIONS "S 10 1.8 2.1"
/**
* Filter low complexity regions from the sequence data; uses the SEG
* algorithm.
*
* @param seqData data to be filtered
* @param program_name type of search being performed
* @return 0 for success; -1 for out-of-memory
*/
static int
s_DoSegSequenceData(BlastCompo_SequenceData * seqData,
EBlastProgramType program_name,
Boolean* is_seq_biased)
{
int status = 0;
BlastSeqLoc* mask_seqloc = NULL;
SBlastFilterOptions* filter_options = NULL;
status = BlastFilteringOptionsFromString(program_name,
BLASTP_MASK_INSTRUCTIONS,
&filter_options, NULL);
if (status == 0) {
status = BlastSetUp_Filter(program_name, seqData->data,
seqData->length, 0, filter_options,
&mask_seqloc, NULL);
filter_options = SBlastFilterOptionsFree(filter_options);
}
if (is_seq_biased) {
*is_seq_biased = (mask_seqloc != NULL);
}
if (status == 0) {
Blast_MaskTheResidues(seqData->data, seqData->length,
FALSE, mask_seqloc, FALSE, 0);
}
if (mask_seqloc != NULL) {
mask_seqloc = BlastSeqLocFree(mask_seqloc);
}
return status;
}
/**
* Obtain a string of translated data
*
* @param self the sequence from which to obtain the data [in]
* @param range the range and translation frame to get [in]
* @param seqData the resulting data [out]
* @param queryData the query sequence [in]
* @param queryOffset offset for align if there are multiple queries
* @param align information about the alignment between query and subject
* @param shouldTestIdentical did alignment pass a preliminary test in
* redo_alignment.c that indicates the sequence
* pieces may be near identical
*
* @return 0 on success; -1 on failure
*/
static int
s_SequenceGetTranslatedRange(const BlastCompo_MatchingSequence * self,
const BlastCompo_SequenceRange * range,
BlastCompo_SequenceData * seqData,
const BlastCompo_SequenceRange * q_range,
BlastCompo_SequenceData * queryData,
const Uint8* query_words,
const BlastCompo_Alignment *align,
const Boolean shouldTestIdentical,
const ECompoAdjustModes compo_adjust_mode,
const Boolean isSmithWaterman,
Boolean* subject_maybe_biased)
{
int status = 0;
BlastKappa_SequenceInfo * local_data; /* BLAST-specific
information associated
with the sequence */
Uint1 * translation_buffer; /* a buffer for the translated,
amino-acid sequence */
Int4 translated_length; /* length of the translated sequence */
int translation_frame; /* frame in which to translate */
Uint1 * na_sequence; /* the nucleotide sequence */
int translation_start; /* location in na_sequence to start
translating */
int num_nucleotides; /* the number of nucleotides to be translated */
local_data = self->local_data;
na_sequence = local_data->seq_arg.seq->sequence_start;
/* Initialize seqData to nil, in case this routine fails */
seqData->buffer = NULL;
seqData->data = NULL;
seqData->length = 0;
translation_frame = range->context;
if (translation_frame > 0) {
translation_start = 3 * range->begin;
} else {
translation_start =
self->length - 3 * range->end + translation_frame + 1;
}
num_nucleotides =
3 * (range->end - range->begin) + ABS(translation_frame) - 1;
status = Blast_GetPartialTranslation(na_sequence + translation_start,
num_nucleotides,
(Int2) translation_frame,
local_data->seq_arg.seq->gen_code_string,
&translation_buffer,
&translated_length,
NULL);
if (status == 0) {
seqData->buffer = translation_buffer;
seqData->data = translation_buffer + 1;
seqData->length = translated_length;
if ( !(KAPPA_TBLASTN_NO_SEG_SEQUENCE) ) {
if (compo_adjust_mode
&& (!subject_maybe_biased || *subject_maybe_biased)) {
if ( (!shouldTestIdentical)
|| (shouldTestIdentical
&& (!s_TestNearIdentical(seqData, range->begin,
queryData, q_range->begin,
query_words, align)))) {
status = s_DoSegSequenceData(seqData, eBlastTypeTblastn,
subject_maybe_biased);
if (status != 0) {
free(seqData->buffer);
seqData->buffer = NULL;
seqData->data = NULL;
seqData->length = 0;
}
}
}
}
}
return status;
}
/**
* Get a string of protein data from a protein sequence.
*
* @param self a protein sequence [in]
* @param range the range to get [in]
* @param seqData the resulting data [out]
* @param queryData the query sequence [in]
* @param queryOffset offset for align if there are multiple queries
* @param align information about the alignment
* between query and subject [in]
* @param shouldTestIdentical did alignment pass a preliminary test in
* redo_alignment.c that indicates the sequence
* pieces may be near identical [in]
*
* @return 0 on success; -1 on failure
*/
static int
s_SequenceGetProteinRange(const BlastCompo_MatchingSequence * self,
const BlastCompo_SequenceRange * range,
BlastCompo_SequenceData * seqData,
const BlastCompo_SequenceRange * q_range,
BlastCompo_SequenceData * queryData,
const Uint8* query_words,
const BlastCompo_Alignment *align,
const Boolean shouldTestIdentical,
const ECompoAdjustModes compo_adjust_mode,
const Boolean isSmithWaterman,
Boolean* subject_maybe_biased)
{
int status = 0; /* return status */
Int4 idx; /* loop index */
Uint1 *origData; /* the unfiltered data for the sequence */
/* BLAST-specific sequence information */
BlastKappa_SequenceInfo * local_data = self->local_data;
BLAST_SequenceBlk * seq = self->local_data;
if (self->local_data == NULL)
return -1;
seqData->data = NULL;
seqData->length = 0;
/* Copy the entire sequence (necessary for SEG filtering.) */
seqData->buffer = calloc((self->length + 2), sizeof(Uint1));
if (seqData->buffer == NULL) {
return -1;
}
/* First and last characters of the buffer MUST be '\0', which is
* true here because the buffer was allocated using calloc. */
seqData->data = seqData->buffer + 1;
seqData->length = self->length;
origData = (self->index >= 0) ? local_data->seq_arg.seq->sequence
: seq->sequence;
if((self->index < 0) && (align->frame != 0)) {
int i=0, offsets =0;
int f = GET_SEQ_FRAME(align->frame);
int nucl_length = GET_NUCL_LENGTH(self->length);
seqData->length = GET_TRANSLATED_LENGTH(nucl_length, f);
for(; i < f; i++) {
offsets = GET_TRANSLATED_LENGTH(nucl_length, i) +1;
origData += offsets;
}
}
/* Copy the sequence data */
for (idx = 0; idx < seqData->length; idx++) {
seqData->data[idx] = origData[idx];
}
if ( !(KAPPA_BLASTP_NO_SEG_SEQUENCE) ) {
if (compo_adjust_mode
&& (!subject_maybe_biased || *subject_maybe_biased)) {
if ( (!shouldTestIdentical)
|| (shouldTestIdentical
&& (!s_TestNearIdentical(seqData, 0, queryData,
q_range->begin, query_words,
align)))) {
status = s_DoSegSequenceData(seqData, eBlastTypeBlastp,
subject_maybe_biased);
}
}
}
/* Fit the data to the range. */
seqData ->data = &seqData->data[range->begin - 1];
*seqData->data++ = '\0';
seqData ->length = range->end - range->begin;
if (status != 0) {
free(seqData->buffer);
seqData->buffer = NULL;
seqData->data = NULL;
}
return status;
}
/**
* Obtain the sequence data that lies within the given range.
*
* @param self sequence information [in]
* @param range range specifying the range of data [in]
* @param seqData the sequence data obtained [out]
* @param seqData the resulting data [out]
* @param queryData the query sequence [in]
* @param queryOffset offset for align if there are multiple queries
* @param align information about the alignment between query and subject
* @param shouldTestIdentical did alignment pass a preliminary test in
* redo_alignment.c that indicates the sequence
* pieces may be near identical
*
* @return 0 on success; -1 on failure
*/
static int
s_SequenceGetRange(const BlastCompo_MatchingSequence * self,
const BlastCompo_SequenceRange * s_range,
BlastCompo_SequenceData * seqData,
const BlastCompo_SequenceData * query,
const BlastCompo_SequenceRange * q_range,
BlastCompo_SequenceData * queryData,
const Uint8* query_words,
const BlastCompo_Alignment *align,
const Boolean shouldTestIdentical,
const ECompoAdjustModes compo_adjust_mode,
const Boolean isSmithWaterman,
Boolean* subject_maybe_biased)
{
Int4 idx;
BlastKappa_SequenceInfo * seq_info = self->local_data;
Uint1 *origData = query->data + q_range->begin;
/* Copy the query sequence (necessary for SEG filtering.) */
queryData->length = q_range->end - q_range->begin;
queryData->buffer = calloc((queryData->length + 2), sizeof(Uint1));
queryData->data = queryData->buffer + 1;
for (idx = 0; idx < queryData->length; idx++) {
/* Copy the sequence data, replacing occurrences of amino acid
* number 24 (Selenocysteine) with number 3 (Cysteine). */
queryData->data[idx] = (origData[idx] != 24) ? origData[idx] : 3;
}
if (seq_info && seq_info->prog_number == eBlastTypeTblastn) {
/* The sequence must be translated. */
return s_SequenceGetTranslatedRange(self, s_range, seqData,
q_range, queryData, query_words,
align, shouldTestIdentical,
compo_adjust_mode, isSmithWaterman,
subject_maybe_biased);
} else {
return s_SequenceGetProteinRange(self, s_range, seqData,
q_range, queryData, query_words,
align, shouldTestIdentical,
compo_adjust_mode, isSmithWaterman,
subject_maybe_biased);
}
}
/** Data and data-structures needed to perform a gapped alignment */
typedef struct BlastKappa_GappingParamsContext {
const BlastScoringParameters*
scoringParams; /**< scoring parameters for a
gapped alignment */
BlastGapAlignStruct * gap_align; /**< additional parameters for a
gapped alignment */
BlastScoreBlk* sbp; /**< the score block for this search */
double localScalingFactor; /**< the amount by which this
search has been scaled */
EBlastProgramType prog_number; /**< the type of search being
performed */
} BlastKappa_GappingParamsContext;
/**
* Reads a BlastGapAlignStruct that has been used to compute a
* traceback, and return a BlastCompo_Alignment representing the
* alignment. The BlastGapAlignStruct is in coordinates local to the
* ranges being aligned; the resulting alignment is in coordinates w.r.t.
* the whole query and subject.
*
* @param gap_align the BlastGapAlignStruct
* @param *edit_script the edit script from the alignment; on exit
* NULL. The edit_script is usually
* gap_align->edit_script, but we don't want
* an implicit side effect on the gap_align.
* @param query_range the range of the query used in this alignment
* @param subject_range the range of the subject used in this alignment
* @param matrix_adjust_rule the rule used to compute the scoring matrix
*
* @return the new alignment on success or NULL on error
*/
static BlastCompo_Alignment *
s_NewAlignmentFromGapAlign(BlastGapAlignStruct * gap_align,
GapEditScript ** edit_script,
BlastCompo_SequenceRange * query_range,
BlastCompo_SequenceRange * subject_range,
EMatrixAdjustRule matrix_adjust_rule)
{
/* parameters to BlastCompo_AlignmentNew */
int queryStart, queryEnd, queryIndex, matchStart, matchEnd, frame;
BlastCompo_Alignment * obj; /* the new alignment */
/* In the composition_adjustment library, the query start/end are
indices into the concatenated query, and so must be shifted. */
queryStart = gap_align->query_start + query_range->begin;
queryEnd = gap_align->query_stop + query_range->begin;
queryIndex = query_range->context;
matchStart = gap_align->subject_start + subject_range->begin;
matchEnd = gap_align->subject_stop + subject_range->begin;
frame = subject_range->context;
obj = BlastCompo_AlignmentNew(gap_align->score, matrix_adjust_rule,
queryStart, queryEnd, queryIndex,
matchStart, matchEnd, frame,
*edit_script);
if (obj != NULL) {
*edit_script = NULL;
}
return obj;
}
/** A callback used when performing SmithWaterman alignments:
* Calculate the traceback for one alignment by performing an x-drop
* alignment in the forward direction, possibly increasing the x-drop
* parameter until the desired score is attained.
*
* The start, end and score of the alignment should be obtained
* using the Smith-Waterman algorithm before this routine is called.
*
* @param *pnewAlign the new alignment
* @param *pqueryEnd on entry, the end of the alignment in the
* query, as computed by the Smith-Waterman
* algorithm. On exit, the end as computed by
* the x-drop algorithm
* @param *pmatchEnd like as *pqueryEnd, but for the subject
* sequence
* @param queryStart the starting point in the query
* @param matchStart the starting point in the subject
* @param score the score of the alignment, as computed by
* the Smith-Waterman algorithm
* @param query query sequence data
* @param query_range range of this query in the concatenated
* query
* @param ccat_query_length total length of the concatenated query
* @param subject subject sequence data
* @param subject_range range of subject_data in the translated
* query, in amino acid coordinates
* @param full_subject_length length of the full subject sequence
* @param gapping_params parameters used to compute gapped
* alignments
* @param matrix_adjust_rule the rule used to compute the scoring matrix
*
* @returns 0 (posts a fatal error if it fails)
* @sa new_xdrop_align_type
*/
static int
s_NewAlignmentUsingXdrop(BlastCompo_Alignment ** pnewAlign,
Int4 * pqueryEnd, Int4 *pmatchEnd,
Int4 queryStart, Int4 matchStart, Int4 score,
BlastCompo_SequenceData * query,
BlastCompo_SequenceRange * query_range,
Int4 ccat_query_length,
BlastCompo_SequenceData * subject,
BlastCompo_SequenceRange * subject_range,
Int4 full_subject_length,
BlastCompo_GappingParams * gapping_params,
EMatrixAdjustRule matrix_adjust_rule)
{
Int4 newScore;
/* Extent of the alignment as computed by an x-drop alignment
* (usually the same as (queryEnd - queryStart) and (matchEnd -
* matchStart)) */
Int4 queryExtent, matchExtent;
BlastCompo_Alignment * obj = NULL; /* the new object */
/* BLAST-specific parameters needed compute an X-drop alignment */
BlastKappa_GappingParamsContext * context = gapping_params->context;
/* Auxiliarly structure for computing gapped alignments */
BlastGapAlignStruct * gap_align = context->gap_align;
/* Scoring parameters for gapped alignments */
const BlastScoringParameters* scoringParams = context->scoringParams;
/* A structure containing the traceback of a gapped alignment */
GapEditScript* editScript = NULL;
/* suppress unused parameter warnings; this is a callback
function, so these parameter cannot be deleted */
(void) ccat_query_length;
(void) full_subject_length;
gap_align->gap_x_dropoff = gapping_params->x_dropoff;
s_SWFindFinalEndsUsingXdrop(query, queryStart, *pqueryEnd,
subject, matchStart, *pmatchEnd,
gap_align, scoringParams,
score, &queryExtent, &matchExtent,
&newScore);
*pqueryEnd = queryStart + queryExtent;
*pmatchEnd = matchStart + matchExtent;
editScript =
Blast_PrelimEditBlockToGapEditScript(gap_align->rev_prelim_tback,
gap_align->fwd_prelim_tback);
if (editScript != NULL) {
/* Shifted values of the endpoints */
Int4 aqueryStart = queryStart + query_range->begin;
Int4 aqueryEnd = *pqueryEnd + query_range->begin;
Int4 amatchStart = matchStart + subject_range->begin;
Int4 amatchEnd = *pmatchEnd + subject_range->begin;
obj = BlastCompo_AlignmentNew(newScore, matrix_adjust_rule,
aqueryStart, aqueryEnd,
query_range->context,
amatchStart, amatchEnd,
subject_range->context, editScript);
if (obj == NULL) {
GapEditScriptDelete(editScript);
}
}
*pnewAlign = obj;
return obj != NULL ? 0 : -1;
}
/**
* A callback: calculate the traceback for one alignment by
* performing an x-drop alignment in both directions
*
* @param in_align the existing alignment, without traceback
* @param matrix_adjust_rule the rule used to compute the scoring matrix
* @param query_data query sequence data
* @param query_range range of this query in the concatenated
* query
* @param ccat_query_length total length of the concatenated query
* @param subject_data subject sequence data
* @param subject_range range of subject_data in the translated
* query, in amino acid coordinates
* @param full_subject_length length of the full subject sequence
* @param gapping_params parameters used to compute gapped
* alignments
* @sa redo_one_alignment_type
*/
static BlastCompo_Alignment *
s_RedoOneAlignment(BlastCompo_Alignment * in_align,
EMatrixAdjustRule matrix_adjust_rule,
BlastCompo_SequenceData * query_data,
BlastCompo_SequenceRange * query_range,
int ccat_query_length,
BlastCompo_SequenceData * subject_data,
BlastCompo_SequenceRange * subject_range,
int full_subject_length,
BlastCompo_GappingParams * gapping_params)
{
int status; /* return code */
Int4 q_start, s_start; /* starting point in query and subject */
/* BLAST-specific parameters needed to compute a gapped alignment */
BlastKappa_GappingParamsContext * context = gapping_params->context;
/* Auxiliary structure for computing gapped alignments */
BlastGapAlignStruct* gapAlign = context->gap_align;
/* The preliminary gapped HSP that were are recomputing */
BlastHSP * hsp = in_align->context;
Boolean fence_hit = FALSE;
/* suppress unused parameter warnings; this is a callback
function, so these parameter cannot be deleted */
(void) ccat_query_length;
(void) full_subject_length;
/* Use the starting point supplied by the HSP. */
q_start = hsp->query.gapped_start - query_range->begin;
s_start = hsp->subject.gapped_start - subject_range->begin;
gapAlign->gap_x_dropoff = gapping_params->x_dropoff;
/*
* Previously, last argument was NULL which could cause problems for
* tblastn.
*/
status =
BLAST_GappedAlignmentWithTraceback(context->prog_number,
query_data->data,
subject_data->data, gapAlign,
context->scoringParams,
q_start, s_start,
query_data->length,
subject_data->length,
&fence_hit);
if (status == 0) {
return s_NewAlignmentFromGapAlign(gapAlign, &gapAlign->edit_script,
query_range, subject_range,
matrix_adjust_rule);
} else {
return NULL;
}
}
/**
* A BlastKappa_SavedParameters holds the value of certain search
* parameters on entry to RedoAlignmentCore. These values are
* restored on exit.
*/
typedef struct BlastKappa_SavedParameters {
Int4 gap_open; /**< a penalty for the existence of a gap */
Int4 gapExtend; /**< a penalty for each residue in the
gap */
double scale_factor; /**< the original scale factor */
Int4 **origMatrix; /**< The original matrix values */
double original_expect_value; /**< expect value on entry */
/** copy of the original gapped Karlin-Altschul block
* corresponding to the first context */
Blast_KarlinBlk** kbp_gap_orig;
Int4 num_queries; /**< Number of queries in this search */
} BlastKappa_SavedParameters;
/**
* Release the data associated with a BlastKappa_SavedParameters and
* delete the object
* @param searchParams the object to be deleted [in][out]
*/
static void
s_SavedParametersFree(BlastKappa_SavedParameters ** searchParams)
{
/* for convenience, remove one level of indirection from searchParams */
BlastKappa_SavedParameters *sp = *searchParams;
if (sp != NULL) {
if (sp->kbp_gap_orig != NULL) {
int i;
for (i = 0; i < sp->num_queries; i++) {
if (sp->kbp_gap_orig[i] != NULL)
Blast_KarlinBlkFree(sp->kbp_gap_orig[i]);
}
free(sp->kbp_gap_orig);
}
if (sp->origMatrix != NULL)
Nlm_Int4MatrixFree(&sp->origMatrix);
}
sfree(*searchParams);
*searchParams = NULL;
}
/**
* Create a new instance of BlastKappa_SavedParameters
*
* @param rows number of rows in the scoring matrix
* @param numQueries number of queries in this search
* @param compo_adjust_mode if >0, use composition-based statistics
* @param positionBased if true, the search is position-based
*/
static BlastKappa_SavedParameters *
s_SavedParametersNew(Int4 rows,
Int4 numQueries,
ECompoAdjustModes compo_adjust_mode,
Boolean positionBased)
{
int i;
BlastKappa_SavedParameters *sp; /* the new object */
sp = malloc(sizeof(BlastKappa_SavedParameters));
if (sp == NULL) {
goto error_return;
}
sp->kbp_gap_orig = NULL;
sp->origMatrix = NULL;
sp->kbp_gap_orig = calloc(numQueries, sizeof(Blast_KarlinBlk*));
if (sp->kbp_gap_orig == NULL) {
goto error_return;
}
sp->num_queries = numQueries;
for (i = 0; i < numQueries; i++) {
sp->kbp_gap_orig[i] = NULL;
}
if (compo_adjust_mode != eNoCompositionBasedStats) {
if (positionBased) {
sp->origMatrix = Nlm_Int4MatrixNew(rows, BLASTAA_SIZE);
} else {
sp->origMatrix = Nlm_Int4MatrixNew(BLASTAA_SIZE, BLASTAA_SIZE);
}
if (sp->origMatrix == NULL)
goto error_return;
}
return sp;
error_return:
s_SavedParametersFree(&sp);
return NULL;
}
/**
* Record the initial value of the search parameters that are to be
* adjusted.
*
* @param searchParams holds the recorded values [out]
* @param sbp a score block [in]
* @param scoring gapped alignment parameters [in]
* @param query_length length of the concatenated query [in]
* @param compo_adjust_mode composition adjustment mode [in]
* @param positionBased is this search position-based [in]
*/
static int
s_RecordInitialSearch(BlastKappa_SavedParameters * searchParams,
BlastScoreBlk* sbp,
const BlastScoringParameters* scoring,
int query_length,
ECompoAdjustModes compo_adjust_mode,
Boolean positionBased)
{
int i;
searchParams->gap_open = scoring->gap_open;
searchParams->gapExtend = scoring->gap_extend;
searchParams->scale_factor = scoring->scale_factor;
for (i = 0; i < searchParams->num_queries; i++) {
if (sbp->kbp_gap[i] != NULL) {
/* There is a kbp_gap for query i and it must be copied */
searchParams->kbp_gap_orig[i] = Blast_KarlinBlkNew();
if (searchParams->kbp_gap_orig[i] == NULL) {
return -1;
}
Blast_KarlinBlkCopy(searchParams->kbp_gap_orig[i],
sbp->kbp_gap[i]);
}
}
if (compo_adjust_mode != eNoCompositionBasedStats) {
Int4 **matrix; /* scoring matrix */
int j; /* iteration index */
int rows; /* number of rows in matrix */
if (positionBased) {
matrix = sbp->psi_matrix->pssm->data;
rows = query_length;
} else {
matrix = sbp->matrix->data;
rows = BLASTAA_SIZE;
}
for (i = 0; i < rows; i++) {
for (j = 0; j < BLASTAA_SIZE; j++) {
searchParams->origMatrix[i][j] = matrix[i][j];
}
}
}
return 0;
}
/**
* Rescale the search parameters in the search object and options
* object to obtain more precision.
*
* @param sbp score block to be rescaled
* @param sp scoring parameters to be rescaled
* @param num_queries number of queries in this search
* @param scale_factor amount by which to scale this search
*/
static void
s_RescaleSearch(BlastScoreBlk* sbp,
BlastScoringParameters* sp,
int num_queries,
double scale_factor)
{
int i;
for (i = 0; i < num_queries; i++) {
if (sbp->kbp_gap[i] != NULL) {
Blast_KarlinBlk * kbp = sbp->kbp_gap[i];
kbp->Lambda /= scale_factor;
kbp->logK = log(kbp->K);
}
}
sp->gap_open = BLAST_Nint(sp->gap_open * scale_factor);
sp->gap_extend = BLAST_Nint(sp->gap_extend * scale_factor);
sp->scale_factor = scale_factor;
}
/**
* Restore the parameters that were adjusted to their original values.
*
* @param sbp the score block to be restored
* @param scoring the scoring parameters to be restored
* @param searchParams the initial recorded values of the parameters
* @param query_length the concatenated query length
* @param positionBased is this search position-based
* @param compo_adjust_mode mode of composition adjustment
*/
static void
s_RestoreSearch(BlastScoreBlk* sbp,
BlastScoringParameters* scoring,
const BlastKappa_SavedParameters * searchParams,
int query_length,
Boolean positionBased,
ECompoAdjustModes compo_adjust_mode)
{
int i;
scoring->gap_open = searchParams->gap_open;
scoring->gap_extend = searchParams->gapExtend;
scoring->scale_factor = searchParams->scale_factor;
for (i = 0; i < searchParams->num_queries; i++) {
if (sbp->kbp_gap[i] != NULL) {
Blast_KarlinBlkCopy(sbp->kbp_gap[i],
searchParams->kbp_gap_orig[i]);
}
}
if(compo_adjust_mode != eNoCompositionBasedStats) {
int j; /* iteration index */
Int4 ** matrix; /* matrix to be restored */
int rows; /* number of rows in the matrix */
if (positionBased) {
matrix = sbp->psi_matrix->pssm->data;
rows = query_length;
} else {
matrix = sbp->matrix->data;
rows = BLASTAA_SIZE;
}
for (i = 0; i < rows; i++) {
for (j = 0; j < BLASTAA_SIZE; j++) {
matrix[i][j] = searchParams->origMatrix[i][j];
}
}
}
}
/**
* Initialize an object of type Blast_MatrixInfo.
*
* @param self object being initialized
* @param queryBlk the query sequence data
* @param sbp score block for this search
* @param scale_factor amount by which ungapped parameters should be
* scaled
* @param matrixName name of the matrix
*/
static int
s_MatrixInfoInit(Blast_MatrixInfo * self,
BLAST_SequenceBlk* queryBlk,
BlastScoreBlk* sbp,
double scale_factor,
const char * matrixName)
{
int status = 0; /* return status */
int lenName; /* length of matrixName as a string */
/* copy the matrix name (strdup is not standard C) */
lenName = strlen(matrixName);
if (NULL == (self->matrixName = malloc(lenName + 1))) {
return -1;
}
memcpy(self->matrixName, matrixName, lenName + 1);
if (self->positionBased) {
status = s_GetPosBasedStartFreqRatios(self->startFreqRatios,
queryBlk->length,
queryBlk->sequence,
matrixName,
sbp->psi_matrix->freq_ratios);
if (status == 0) {
status = s_ScalePosMatrix(self->startMatrix, matrixName,
sbp->psi_matrix->freq_ratios,
queryBlk->sequence,
queryBlk->length, sbp, scale_factor);
self->ungappedLambda = sbp->kbp_psi[0]->Lambda / scale_factor;
}
} else {
self->ungappedLambda = sbp->kbp_ideal->Lambda / scale_factor;
status = s_GetStartFreqRatios(self->startFreqRatios, matrixName);
if (status == 0) {
Blast_Int4MatrixFromFreq(self->startMatrix, self->cols,
self->startFreqRatios,
self->ungappedLambda);
}
}
return status;
}
/* Create an array of 8-mers for a sequence, such that index of each 8-mer
is the same as its position in the query */
static int
s_CreateWordArray(const Uint1* seq_data, Int4 seq_len, Uint8** words)
{
int word_size = 8; /* word size for k-mer matching */
Uint8* query_hashes; /* list of hashes for query words */
Uint8 mask = NCBI_CONST_UINT8(0xFFFFFFFFFF); /* mask for computing hash
values */
int i;
/* if query or subject length is smaller than word size, exit */
if (!seq_data || !words || seq_len < word_size) {
return -1;
}
query_hashes = (Uint8*)calloc((seq_len - word_size + 1),
sizeof(Uint8));
*words = query_hashes;
if (!query_hashes) {
return -1;
}
/* find query word hashes */
query_hashes[0] = s_GetHash(&seq_data[0], word_size);
for (i = 1; i < seq_len - word_size; i++) {
query_hashes[i] = query_hashes[i - 1];
query_hashes[i] <<= 5;
query_hashes[i] &= mask;
query_hashes[i] += (Uint8)seq_data[i + word_size - 1];
}
return 0;
}
static void s_FreeBlastCompo_QueryInfoArray(BlastCompo_QueryInfo** query_info,
int num_queries)
{
int i;
if (!query_info) {
return;
}
for (i = 0;i < num_queries;i++) {
if ((*query_info)[i].words) {
free((*query_info)[i].words);
}
}
free(*query_info);
*query_info = NULL;
}
/**
* Save information about all queries in an array of objects of type
* BlastCompo_QueryInfo.
*
* @param query_data query sequence data
* @param blast_query_info information about all queries, as an
* internal blast data structure
*
* @return the new array on success, or NULL on error
*/
static BlastCompo_QueryInfo *
s_GetQueryInfo(Uint1 * query_data, const BlastQueryInfo * blast_query_info, Boolean skip)
{
int i; /* loop index */
BlastCompo_QueryInfo *
compo_query_info; /* the new array */
int num_queries; /* the number of queries/elements in
compo_query_info */
num_queries = blast_query_info->last_context + 1;
compo_query_info = calloc(num_queries, sizeof(BlastCompo_QueryInfo));
if (compo_query_info != NULL) {
for (i = 0; i < num_queries; i++) {
BlastCompo_QueryInfo * query_info = &compo_query_info[i];
const BlastContextInfo * query_context = &blast_query_info->contexts[i];
query_info->eff_search_space =
(double) query_context->eff_searchsp;
query_info->origin = query_context->query_offset;
query_info->seq.data = &query_data[query_info->origin];
query_info->seq.length = query_context->query_length;
query_info->words = NULL;
s_CreateWordArray(query_info->seq.data, query_info->seq.length,
&query_info->words);
if (! skip) {
Blast_ReadAaComposition(&query_info->composition, BLASTAA_SIZE,
query_info->seq.data,
query_info->seq.length);
}
}
}
return compo_query_info;
}
/**
* Create a new object of type BlastCompo_GappingParams. The new
* object contains the parameters needed by the composition adjustment
* library to compute a gapped alignment.
*
* @param context the data structures needed by callback functions
* that perform the gapped alignments.
* @param extendParams parameters used for a gapped extension
* @param num_queries the number of queries in the concatenated query
*/
static BlastCompo_GappingParams *
s_GappingParamsNew(BlastKappa_GappingParamsContext * context,
const BlastExtensionParameters* extendParams,
int num_queries)
{
int i;
double min_lambda = DBL_MAX; /* smallest gapped Lambda */
const BlastScoringParameters * scoring = context->scoringParams;
const BlastExtensionOptions * options = extendParams->options;
/* The new object */
BlastCompo_GappingParams * gapping_params = NULL;
gapping_params = malloc(sizeof(BlastCompo_GappingParams));
if (gapping_params == NULL)
return NULL;
gapping_params->gap_open = scoring->gap_open;
gapping_params->gap_extend = scoring->gap_extend;
gapping_params->context = context;
for (i = 0; i < num_queries; i++) {
if (context->sbp->kbp_gap[i] != NULL &&
context->sbp->kbp_gap[i]->Lambda < min_lambda) {
min_lambda = context->sbp->kbp_gap[i]->Lambda;
}
}
gapping_params->x_dropoff = (Int4)
MAX(options->gap_x_dropoff_final*NCBIMATH_LN2 / min_lambda,
extendParams->gap_x_dropoff_final);
context->gap_align->gap_x_dropoff = gapping_params->x_dropoff;
return gapping_params;
}
/** Callbacks used by the Blast_RedoOneMatch* routines */
static const Blast_RedoAlignCallbacks
redo_align_callbacks = {
s_CalcLambda, s_SequenceGetRange, s_RedoOneAlignment,
s_NewAlignmentUsingXdrop, s_FreeEditScript
};
/* Bit score per alignment position threshold for preliminaru near identical
test */
#define NEAR_IDENTICAL_BITS_PER_POSITION (1.74)
/**
* Read the parameters required for the Blast_RedoOneMatch* functions from
* the corresponding parameters in standard BLAST datatypes. Return a new
* object representing these parameters.
*/
static Blast_RedoAlignParams *
s_GetAlignParams(BlastKappa_GappingParamsContext * context,
BLAST_SequenceBlk * queryBlk,
const BlastQueryInfo* queryInfo,
const BlastHitSavingParameters* hitParams,
const BlastExtensionParameters* extendParams)
{
int status = 0; /* status code */
int rows; /* number of rows in the scoring matrix */
int cutoff_s; /* cutoff score for saving an alignment */
double cutoff_e; /* cutoff evalue for saving an alignment */
BlastCompo_GappingParams *
gapping_params = NULL; /* parameters needed to compute a gapped
alignment */
Blast_MatrixInfo *
scaledMatrixInfo; /* information about the scoring matrix */
/* does this kind of search translate the database sequence */
int subject_is_translated = (context->prog_number == eBlastTypeTblastn) || (context->prog_number == eBlastTypeRpsTblastn);
int query_is_translated = context->prog_number == eBlastTypeBlastx;
/* is this a positiion-based search */
Boolean positionBased = (Boolean) (context->sbp->psi_matrix != NULL);
/* will BLAST_LinkHsps be called to assign e-values */
Boolean do_link_hsps = (hitParams->do_sum_stats);
ECompoAdjustModes compo_adjust_mode =
(ECompoAdjustModes) extendParams->options->compositionBasedStats;
/* per position bit score cutoff for testing whether sequences are
near identical */
double near_identical_cutoff_bits = NEAR_IDENTICAL_BITS_PER_POSITION;
/* score block is already scaled by context->localScalingFactor */
double near_identical_cutoff=0;
Int4 index;
for (index = queryInfo->first_context;
index <= queryInfo->last_context; ++index) {
if ((queryInfo->contexts[index].is_valid)) {
near_identical_cutoff =
(near_identical_cutoff_bits * NCBIMATH_LN2)
/ context->sbp->kbp_gap[index]->Lambda;
break;
}
}
if (do_link_hsps) {
ASSERT(hitParams->link_hsp_params != NULL);
cutoff_s =
(int) (hitParams->cutoff_score_min * context->localScalingFactor);
} else {
/* There is no cutoff score; we consider e-values instead */
cutoff_s = 1;
}
cutoff_e = hitParams->options->expect_value;
rows = positionBased ? queryInfo->max_length : BLASTAA_SIZE;
scaledMatrixInfo = Blast_MatrixInfoNew(rows, BLASTAA_SIZE, positionBased);
status = s_MatrixInfoInit(scaledMatrixInfo, queryBlk, context->sbp,
context->localScalingFactor,
context->scoringParams->options->matrix);
if (status != 0) {
return NULL;
}
gapping_params = s_GappingParamsNew(context, extendParams,
queryInfo->last_context + 1);
if (gapping_params == NULL) {
return NULL;
} else {
return
Blast_RedoAlignParamsNew(&scaledMatrixInfo, &gapping_params,
compo_adjust_mode, positionBased,
query_is_translated,
subject_is_translated,
queryInfo->max_length, cutoff_s, cutoff_e,
do_link_hsps, &redo_align_callbacks,
near_identical_cutoff);
}
}
/**
* Convert an array of BlastCompo_Heap objects to a BlastHSPResults structure.
*
* @param results BLAST core external results structure (pre-SeqAlign)
* [out]
* @param heaps an array of BlastCompo_Heap objects
* @param hitlist_size size of each list in the results structure above [in]
*/
static void
s_FillResultsFromCompoHeaps(BlastHSPResults * results,
BlastCompo_Heap heaps[],
Int4 hitlist_size)
{
int query_index; /* loop index */
int num_queries; /* Number of queries in this search */
num_queries = results->num_queries;
for (query_index = 0; query_index < num_queries; query_index++) {
BlastHSPList* hsp_list;
BlastHitList* hitlist;
BlastCompo_Heap * heap = &heaps[query_index];
results->hitlist_array[query_index] = Blast_HitListNew(hitlist_size);
hitlist = results->hitlist_array[query_index];
while (NULL != (hsp_list = BlastCompo_HeapPop(heap))) {
Blast_HitListUpdate(hitlist, hsp_list);
}
}
Blast_HSPResultsReverseOrder(results);
}
/** Remove all matches from a BlastCompo_Heap. */
static void s_ClearHeap(BlastCompo_Heap * self)
{
BlastHSPList* hsp_list = NULL; /* an element of the heap */
while (NULL != (hsp_list = BlastCompo_HeapPop(self))) {
hsp_list = Blast_HSPListFree(hsp_list);
}
}
/**
* Free a BlastGapAlignStruct copy created by s_BlastGapAlignStruct_Copy
*
* @param copy Pointer to BlastGapAlignStruct to be freed
*/
static void s_BlastGapAlignStruct_Free(BlastGapAlignStruct* copy)
{
{
while (copy->state_struct != NULL) {
GapStateArrayStruct* cur = copy->state_struct;
copy->state_struct = copy->state_struct->next;
if (cur->state_array) {
sfree(cur->state_array);
}
if (cur) {
sfree(cur);
}
}
}
{
if (copy->edit_script != NULL) {
if (copy->edit_script->op_type) {
sfree(copy->edit_script->op_type);
}
if (copy->edit_script->num) {
sfree(copy->edit_script->num);
}
sfree(copy->edit_script);
}
}
{
if (copy->fwd_prelim_tback != NULL) {
if (copy->fwd_prelim_tback->edit_ops) {
sfree(copy->fwd_prelim_tback->edit_ops);
}
sfree(copy->fwd_prelim_tback);
}
}
{
if (copy->rev_prelim_tback != NULL) {
if (copy->rev_prelim_tback->edit_ops) {
sfree(copy->rev_prelim_tback->edit_ops);
}
sfree(copy->rev_prelim_tback);
}
}
{
if (copy->greedy_align_mem != NULL) {
sfree(copy->greedy_align_mem);
}
}
{
if (copy->dp_mem != NULL) {
sfree(copy->dp_mem);
}
}
{
if (copy->sbp != NULL) {
sfree(copy->sbp);
}
}
sfree(copy);
}
/**
* Create a "deep" copy of a BlastGapAlignStruct structure.
*
* Non-pointer structure members are copied. Pointers to data which will
* only be read are copied. For data which will be changing, memory for copies
* will be allocated and new pointers will be assigned to them. The process
* repeats down the structure hierarchy until all pointers are dealt with.
*
* @param orig Pointer to BlastGapAlignStruct structure to be copied
* @param sbp Pointer to BlastScoreBlk structure, required to set copy->sbp
*
* @return Pointer to copy of original BlastGapAlignStruct structure
*/
static BlastGapAlignStruct* s_BlastGapAlignStruct_Copy(
BlastGapAlignStruct* orig,
BlastScoreBlk* sbp
)
{
BlastGapAlignStruct* copy =
(BlastGapAlignStruct*) calloc(1, sizeof(BlastGapAlignStruct));
// Copy plain old data (ints, doubles, booleans, ...).
// Any pointer members will be processed separately.
memcpy(copy, orig, sizeof(BlastGapAlignStruct));
{
GapStateArrayStruct* o = orig->state_struct;
if (o != NULL) {
GapStateArrayStruct* c = (GapStateArrayStruct*) calloc(
1,
sizeof(GapStateArrayStruct)
);
copy->state_struct = c;
memcpy(c, o, sizeof(GapStateArrayStruct));
c->state_array = (Uint1*) calloc(c->length, sizeof(Uint1));
int i;
for (i = 0; i < c->length; ++i) {
c->state_array[i] = o->state_array[i];
}
while (o->next != NULL) {
c->next = (GapStateArrayStruct*)
calloc(1, sizeof(GapStateArrayStruct));
c = c->next;
o = o->next;
memcpy(c, o, sizeof(GapStateArrayStruct));
c->state_array = (Uint1*) calloc(c->length, sizeof(Uint1));
int i;
for (i = 0; i < c->length; ++i) {
c->state_array[i] = o->state_array[i];
}
}
}
}
{
GapEditScript* o = orig->edit_script;
if (o != NULL) {
GapEditScript* c = (GapEditScript*) calloc(
1,
sizeof(GapEditScript)
);
copy->edit_script = c;
memcpy(c, o, sizeof(GapEditScript));
c->op_type = (EGapAlignOpType*) calloc(
o->size,
sizeof(EGapAlignOpType)
);
c->num = (Int4*) calloc(o->size, sizeof(Int4));
int i;
for (i = 0; i < o->size; ++i) {
c->op_type[i] = o->op_type[i];
c->num[i] = o->num[i];
}
}
}
{
GapPrelimEditBlock* o = orig->fwd_prelim_tback;
if (o != NULL) {
GapPrelimEditBlock* c = (GapPrelimEditBlock*) calloc(
1,
sizeof(GapPrelimEditBlock)
);
copy->fwd_prelim_tback = c;
memcpy(c, o, sizeof(GapPrelimEditBlock));
c->edit_ops = calloc(
o->num_ops_allocated,
sizeof(GapPrelimEditScript)
);
int i;
for (i = 0; i < o->num_ops_allocated; ++i) {
c->edit_ops[i].op_type = o->edit_ops[i].op_type;
c->edit_ops[i].num = o->edit_ops[i].num;
}
}
}
{
GapPrelimEditBlock* o = orig->rev_prelim_tback;
if (o != NULL) {
GapPrelimEditBlock* c = (GapPrelimEditBlock*) calloc(
1,
sizeof(GapPrelimEditBlock)
);
copy->rev_prelim_tback = c;
memcpy(c, o, sizeof(GapPrelimEditBlock));
c->edit_ops = calloc(
o->num_ops_allocated,
sizeof(GapPrelimEditScript)
);
int i;
for (i = 0; i < o->num_ops_allocated; ++i) {
c->edit_ops[i].op_type = o->edit_ops[i].op_type;
c->edit_ops[i].num = o->edit_ops[i].num;
}
}
}
{
SGreedyAlignMem* o = orig->greedy_align_mem;
if (o != NULL) {
SGreedyAlignMem* c = (SGreedyAlignMem*) calloc(
1,
sizeof(SGreedyAlignMem)
);
copy->greedy_align_mem = c;
memcpy(c, o, sizeof(SGreedyAlignMem));
}
}
{
BlastGapDP* o = orig->dp_mem;
if (o != NULL) {
BlastGapDP* c = (BlastGapDP*) calloc(
orig->dp_mem_alloc,
sizeof(BlastGapDP)
);
copy->dp_mem = c;
memcpy(c, o, orig->dp_mem_alloc * sizeof(BlastGapDP));
}
}
{
copy->sbp = sbp;
}
return copy;
}
/**
* Free a BlastScoreBlk copy created by s_BlastScoreBlk_Copy
*
* BlastScoreBlk* pointer "bsb_ptr" should be passed as (&bsb_ptr);
* this function will set bsb_ptr to NULL before returning.
*
* @param copy Pointer to (pointer to BlastScoreBlk to be freed)
*/
static
void s_BlastScoreBlk_Free(BlastScoreBlk** copy)
{
BlastScoreBlkFree(*copy);
*copy = NULL;
}
/**
* Create a "deep" copy of a BlastScoreBlk structure.
*
* Non-pointer structure members are copied. Pointers to data which will
* only be read are copied. For data which will be changing, memory for copies
* will be allocated and new pointers will be assigned to them. The process
* repeats down the structure hierarchy until all pointers are dealt with.
*
* @param program The program type
* @param orig Pointer to BlastScoreBlk structure to be copied
* @param alphabet_code Alphabet code
* @param number_of_contexts Number of contexts
*
* @return Pointer to copy of original BlastScoreBlk structure
*/
static
BlastScoreBlk* s_BlastScoreBlk_Copy(
EBlastProgramType program,
BlastScoreBlk* orig,
Uint1 alphabet_code,
Int4 number_of_contexts
)
{
BlastScoreBlk* copy = BlastScoreBlkNew(
orig->alphabet_code,
orig->number_of_contexts
);
if (copy == NULL) {
return NULL;
}
copy->alphabet_start = orig->alphabet_start;
copy->name = strdup(orig->name);
copy->comments = orig->comments;
/* Deep-copy orig->matrix */
if (orig->matrix != NULL) {
if (copy->matrix == NULL) {
return BlastScoreBlkFree(copy);
}
SBlastScoreMatrix* m = copy->matrix;
if (m->data != NULL && orig->matrix->data != NULL) {
int i;
for (i = 0; i < orig->matrix->ncols; ++i) {
memcpy(
m->data[i],
orig->matrix->data[i],
m->nrows * sizeof(int)
);
}
}
if (m->freqs != NULL && orig->matrix->freqs != NULL) {
memcpy(
m->freqs,
orig->matrix->freqs,
m->ncols * sizeof(double)
);
}
m->lambda = orig->matrix->lambda;
}
/* Deep-copy orig->psi_matrix */
if (orig->psi_matrix != NULL
&& orig->psi_matrix->pssm != NULL) {
copy->psi_matrix = SPsiBlastScoreMatrixNew(orig->psi_matrix->pssm->ncols);
if (copy->psi_matrix == NULL) {
return BlastScoreBlkFree(copy);
}
SPsiBlastScoreMatrix* pm = copy->psi_matrix;
SBlastScoreMatrix* m = pm->pssm;
if (m->data != NULL && orig->psi_matrix->pssm->data != NULL) {
int i;
for (i = 0; i < orig->psi_matrix->pssm->ncols; ++i) {
memcpy(
m->data[i],
orig->psi_matrix->pssm->data[i],
m->nrows * sizeof(int)
);
}
}
if (m->freqs != NULL
&& orig->psi_matrix->pssm->freqs != NULL) {
memcpy(
m->freqs,
orig->psi_matrix->pssm->freqs,
m->ncols * sizeof(double)
);
}
m->lambda = orig->psi_matrix->pssm->lambda;
if (pm->freq_ratios != NULL
&& orig->psi_matrix->freq_ratios != NULL) {
int i;
for (i = 0; i < orig->psi_matrix->pssm->ncols; ++i) {
memcpy(
pm->freq_ratios[i],
orig->psi_matrix->freq_ratios[i],
orig->psi_matrix->pssm->nrows * sizeof(double)
);
}
}
if (orig->psi_matrix->kbp != NULL) {
memcpy(pm->kbp, orig->psi_matrix->kbp, sizeof(Blast_KarlinBlk));
}
}
copy->matrix_only_scoring = orig->matrix_only_scoring;
copy->complexity_adjusted_scoring = orig->complexity_adjusted_scoring;
copy->loscore = orig->loscore;
copy->hiscore = orig->hiscore;
copy->penalty = orig->penalty;
copy->reward = orig->reward;
copy->read_in_matrix = orig->read_in_matrix;
if (Blast_QueryIsPssm(program)) {
copy->kbp = copy->kbp_psi;
copy->kbp_gap = copy->kbp_gap_psi;
} else {
copy->kbp = copy->kbp_std;
copy->kbp_gap = copy->kbp_gap_std;
}
if (orig->gbp != NULL) {
memcpy(copy->gbp, orig->gbp, sizeof(Blast_GumbelBlk));
}
int ctx;
for (ctx = 0; ctx < orig->number_of_contexts; ++ctx) {
if (orig->sfp != NULL && orig->sfp[ctx] != NULL) {
copy->sfp[ctx] = Blast_ScoreFreqNew(
orig->sfp[ctx]->score_min,
orig->sfp[ctx]->score_max
);
if (copy->sfp[ctx] == NULL) {
return BlastScoreBlkFree(copy);
}
copy->sfp[ctx]->obs_min = orig->sfp[ctx]->obs_min;
copy->sfp[ctx]->obs_max = orig->sfp[ctx]->obs_max;
copy->sfp[ctx]->score_avg = orig->sfp[ctx]->score_avg;
int r = orig->sfp[ctx]->score_max - orig->sfp[ctx]->score_min + 1;
memcpy(
copy->sfp[ctx]->sprob0,
orig->sfp[ctx]->sprob0,
r * sizeof(double)
);
}
if (orig->kbp_std != NULL && orig->kbp_std[ctx] != NULL) {
copy->kbp_std[ctx] = Blast_KarlinBlkNew();
if (Blast_KarlinBlkCopy(copy->kbp_std[ctx], orig->kbp_std[ctx]) != 0) {
return BlastScoreBlkFree(copy);
}
}
if (orig->kbp_gap_std != NULL && orig->kbp_gap_std[ctx] != NULL) {
copy->kbp_gap_std[ctx] = Blast_KarlinBlkNew();
if (Blast_KarlinBlkCopy(copy->kbp_gap_std[ctx], orig->kbp_gap_std[ctx]) != 0) {
return BlastScoreBlkFree(copy);
}
}
if (orig->kbp_psi != NULL && orig->kbp_psi[ctx] != NULL) {
copy->kbp_psi[ctx] = Blast_KarlinBlkNew();
if (Blast_KarlinBlkCopy(copy->kbp_psi[ctx], orig->kbp_psi[ctx]) != 0) {
return BlastScoreBlkFree(copy);
}
}
if (orig->kbp_gap_psi != NULL && orig->kbp_gap_psi[ctx] != NULL) {
copy->kbp_gap_psi[ctx] = Blast_KarlinBlkNew();
if (Blast_KarlinBlkCopy(copy->kbp_gap_psi[ctx], orig->kbp_gap_psi[ctx]) != 0) {
return BlastScoreBlkFree(copy);
}
}
if (Blast_QueryIsPssm(program)) {
copy->kbp[ctx] = copy->kbp_psi[ctx];
copy->kbp_gap[ctx] = copy->kbp_gap_psi[ctx];
} else {
copy->kbp[ctx] = copy->kbp_std[ctx];
copy->kbp_gap[ctx] = copy->kbp_gap_std[ctx];
}
}
if (orig->kbp_ideal != NULL) {
copy->kbp_ideal = Blast_KarlinBlkNew();
if (Blast_KarlinBlkCopy(copy->kbp_ideal, orig->kbp_ideal) != 0) {
return BlastScoreBlkFree(copy);
}
}
copy->ambiguous_res = (Uint1*) calloc(orig->ambig_size, sizeof(Uint1));
if (orig->ambiguous_res != NULL) {
memcpy(copy->ambiguous_res, orig->ambiguous_res, orig->ambig_size);
}
copy->ambig_size = orig->ambig_size;
copy->ambig_occupy = orig->ambig_occupy;
copy->round_down = orig->round_down;
return copy;
}
/**
* Recompute alignments for each match found by the gapped BLAST
* algorithm. Single-thread adapter to Blast_RedoAlignmentCore_MT.
*/
Int2
Blast_RedoAlignmentCore(EBlastProgramType program_number,
BLAST_SequenceBlk * queryBlk,
const BlastQueryInfo* queryInfo,
BlastScoreBlk* sbp,
BLAST_SequenceBlk * subjectBlk,
const BlastSeqSrc* seqSrc,
Int4 default_db_genetic_code,
BlastHSPList * thisMatch,
BlastHSPStream* hsp_stream,
BlastScoringParameters* scoringParams,
const BlastExtensionParameters* extendParams,
const BlastHitSavingParameters* hitParams,
const PSIBlastOptions* psiOptions,
BlastHSPResults* results)
{
return Blast_RedoAlignmentCore_MT(
program_number,
1, /* number of threads */
queryBlk,
queryInfo,
sbp,
subjectBlk,
seqSrc,
default_db_genetic_code,
thisMatch,
hsp_stream,
scoringParams,
extendParams,
hitParams,
psiOptions,
results
);
}
/**
* Recompute alignments for each match found by the gapped BLAST
* algorithm.
*/
Int2
Blast_RedoAlignmentCore_MT(EBlastProgramType program_number,
Uint4 num_threads,
BLAST_SequenceBlk * queryBlk,
const BlastQueryInfo* queryInfo,
BlastScoreBlk* sbp,
BLAST_SequenceBlk * subjectBlk,
const BlastSeqSrc* seqSrc,
Int4 default_db_genetic_code,
BlastHSPList * thisMatch,
BlastHSPStream* hsp_stream,
BlastScoringParameters* scoringParams,
const BlastExtensionParameters* extendParams,
const BlastHitSavingParameters* hitParams,
const PSIBlastOptions* psiOptions,
BlastHSPResults* results)
{
int status_code = 0; /* return value code */
/* the factor by which to scale the scoring system in order to
* obtain greater precision */
double localScalingFactor;
/* forbidden ranges for each database position (used in
* Smith-Waterman alignments) */
Blast_ForbiddenRanges forbidden = {0,};
/* a collection of alignments for each query sequence with
* sequences from the database */
BlastCompo_Heap* redoneMatches = NULL;
/* stores all fields needed for computing a compositionally
* adjusted score matrix using Newton's method */
Blast_CompositionWorkspace** NRrecord_tld = NULL;
/* loop index */
int query_index;
/* number of queries in the concatenated query */
int numQueries = queryInfo->num_queries;
/* number of contexts in the concatenated query */
int numContexts = queryInfo->last_context + 1;
/* number of contexts within a query */
int numFrames = (program_number == eBlastTypeBlastx) ? 6:1;
/* keeps track of gapped alignment params */
BlastGapAlignStruct* gapAlign = NULL;
/* the values of the search parameters that will be recorded, altered
* in the search structure in this routine, and then restored before
* the routine exits. */
BlastKappa_SavedParameters *savedParams = NULL;
/* All alignments above this value will be reported, no matter how many. */
double inclusion_ethresh;
BlastHSPResults* local_results = NULL;
BlastCompo_QueryInfo** query_info_tld = NULL;
int* numContexts_tld = NULL;
int* compositionTestIndex_tld = NULL;
Blast_RedoAlignParams** redo_align_params_tld = NULL;
BLAST_SequenceBlk** subjectBlk_tld = NULL;
Boolean positionBased = (Boolean) (sbp->psi_matrix != NULL);
ECompoAdjustModes compo_adjust_mode =
(ECompoAdjustModes) extendParams->options->compositionBasedStats;
Boolean smithWaterman =
(Boolean) (extendParams->options->eTbackExt == eSmithWatermanTbck);
/* which test function do we use to see if a composition-adjusted
p-value is desired; value needs to be passed in eventually*/
int compositionTestIndex = extendParams->options->unifiedP;
Uint1* genetic_code_string = GenCodeSingletonFind(default_db_genetic_code);
ASSERT(program_number == eBlastTypeBlastp ||
program_number == eBlastTypeTblastn ||
program_number == eBlastTypeBlastx ||
program_number == eBlastTypePsiBlast ||
program_number == eBlastTypeRpsBlast ||
program_number == eBlastTypeRpsTblastn);
if (0 == strcmp(scoringParams->options->matrix, "BLOSUM62_20") &&
compo_adjust_mode == eNoCompositionBasedStats) {
return -1; /* BLOSUM62_20 only makes sense if
* compo_adjust_mode is on */
}
if (positionBased) {
/* Position based searches can only use traditional
* composition based stats */
if ((int) compo_adjust_mode > 1) {
compo_adjust_mode = eCompositionBasedStats;
}
/* A position-based search can only have one query */
ASSERT(queryInfo->num_queries == 1);
ASSERT(queryBlk->length == (Int4)sbp->psi_matrix->pssm->ncols);
}
if ((int) compo_adjust_mode > 1 &&
!Blast_FrequencyDataIsAvailable(scoringParams->options->matrix)) {
return -1; /* Unsupported matrix */
}
/*****************/
inclusion_ethresh = (psiOptions /* this can be NULL for CBl2Seq */
? psiOptions->inclusion_ethresh
: PSI_INCLUSION_ETHRESH);
ASSERT(inclusion_ethresh != 0.0);
int actual_num_threads = 1;
#ifdef _OPENMP
actual_num_threads = num_threads;
#endif
/* Initialize savedParams */
savedParams =
s_SavedParametersNew(queryInfo->max_length, numContexts,
compo_adjust_mode, positionBased);
if (savedParams == NULL) {
status_code = -1;
goto function_cleanup;
}
status_code =
s_RecordInitialSearch(savedParams, sbp, scoringParams,
queryInfo->max_length, compo_adjust_mode,
positionBased);
if (status_code != 0) {
goto function_cleanup;
}
if (compo_adjust_mode != eNoCompositionBasedStats) {
if((0 == strcmp(scoringParams->options->matrix, "BLOSUM62_20"))) {
localScalingFactor = SCALING_FACTOR / 10;
} else {
localScalingFactor = SCALING_FACTOR;
}
} else {
localScalingFactor = 1.0;
}
s_RescaleSearch(sbp, scoringParams, numContexts, localScalingFactor);
status_code =
BLAST_GapAlignStructNew(scoringParams, extendParams,
(seqSrc) ? BlastSeqSrcGetMaxSeqLen(seqSrc)
: subjectBlk->length,
sbp, &gapAlign);
if (status_code != 0) {
return (Int2) status_code;
}
if(smithWaterman) {
status_code =
Blast_ForbiddenRangesInitialize(&forbidden, queryInfo->max_length);
if (status_code != 0) {
goto function_cleanup;
}
}
redoneMatches = calloc(numQueries, sizeof(BlastCompo_Heap));
if (redoneMatches == NULL) {
status_code = -1;
goto function_cleanup;
}
for (query_index = 0; query_index < numQueries; query_index++) {
status_code =
BlastCompo_HeapInitialize(&redoneMatches[query_index],
hitParams->options->hitlist_size,
inclusion_ethresh);
if (status_code != 0) {
goto function_cleanup;
}
}
BlastCompo_Heap** redoneMatches_tld =
(BlastCompo_Heap**) calloc(
actual_num_threads,
sizeof(BlastCompo_Heap*)
);
BlastCompo_Alignment*** alignments_tld =
(BlastCompo_Alignment***) calloc(
actual_num_threads,
sizeof(BlastCompo_Alignment**)
);
BlastCompo_Alignment*** incoming_align_set_tld =
(BlastCompo_Alignment***) calloc(
actual_num_threads,
sizeof(BlastCompo_Alignment**)
);
BlastKappa_SavedParameters** savedParams_tld =
(BlastKappa_SavedParameters**) calloc(
actual_num_threads,
sizeof(BlastKappa_SavedParameters*)
);
BlastScoreBlk** sbp_tld =
(BlastScoreBlk**) calloc(
actual_num_threads,
sizeof(BlastScoreBlk*)
);
BlastKappa_GappingParamsContext* gapping_params_context_tld =
(BlastKappa_GappingParamsContext*) calloc(
actual_num_threads,
sizeof(BlastKappa_GappingParamsContext)
);
Int4*** matrix_tld =
(Int4***) calloc(
actual_num_threads,
sizeof(Int4**)
);
NRrecord_tld =
(Blast_CompositionWorkspace**) calloc(
actual_num_threads,
sizeof(Blast_CompositionWorkspace*)
);
subjectBlk_tld =
(BLAST_SequenceBlk**) calloc(
actual_num_threads,
sizeof(BLAST_SequenceBlk*)
);
redo_align_params_tld =
(Blast_RedoAlignParams**) calloc(
actual_num_threads,
sizeof(Blast_RedoAlignParams*)
);
int* status_code_tld =
(int*) calloc(
actual_num_threads,
sizeof(int)
);
BlastSeqSrc** seqsrc_tld =
(BlastSeqSrc**) calloc(
actual_num_threads,
sizeof(BlastSeqSrc*)
);
BlastGapAlignStruct** gap_align_tld =
(BlastGapAlignStruct**) calloc(
actual_num_threads,
sizeof(BlastGapAlignStruct*)
);
BlastScoringParameters** score_params_tld =
(BlastScoringParameters**) calloc(
actual_num_threads,
sizeof(BlastScoringParameters*)
);
BlastHitSavingParameters** hit_params_tld =
(BlastHitSavingParameters**) calloc(
actual_num_threads,
sizeof(BlastHitSavingParameters*)
);
BlastHSPResults** results_tld =
(BlastHSPResults**) calloc(
actual_num_threads,
sizeof(BlastHSPResults*)
);
query_info_tld =
(BlastCompo_QueryInfo**) calloc(
actual_num_threads,
sizeof(BlastCompo_QueryInfo*)
);
numContexts_tld =
(int*) calloc(
actual_num_threads,
sizeof(int)
);
compositionTestIndex_tld =
(int*) calloc(
actual_num_threads,
sizeof(int)
);
int i;
for (i = 0; i < actual_num_threads; ++i) {
query_info_tld[i] = s_GetQueryInfo(
queryBlk->sequence,
queryInfo,
(program_number == eBlastTypeBlastx)
);
if (query_info_tld[i] == NULL) {
status_code = -1;
goto function_cleanup;
}
sbp_tld[i] = s_BlastScoreBlk_Copy(
program_number,
sbp,
sbp->alphabet_code,
sbp->number_of_contexts
);
numContexts_tld[i] = numContexts;
compositionTestIndex_tld[i] = compositionTestIndex;
seqsrc_tld[i] = BlastSeqSrcCopy(seqSrc);
gap_align_tld[i] =
s_BlastGapAlignStruct_Copy(gapAlign, sbp_tld[i]);
score_params_tld[i] = scoringParams;
hit_params_tld[i] = (BlastHitSavingParameters*) hitParams;
results_tld[i] =
Blast_HSPResultsNew(queryInfo->num_queries);
subjectBlk_tld[i] = subjectBlk;
redoneMatches_tld[i] =
(BlastCompo_Heap*) calloc(numQueries, sizeof(BlastCompo_Heap));
if (redoneMatches_tld[i] == NULL) {
status_code = -1;
goto function_cleanup;
}
for (query_index = 0; query_index < numQueries; query_index++) {
status_code =
BlastCompo_HeapInitialize(&redoneMatches_tld[i][query_index],
hitParams->options->hitlist_size,
inclusion_ethresh);
if (status_code != 0) {
goto function_cleanup;
}
}
alignments_tld[i] = (BlastCompo_Alignment**) calloc(
numContexts,
sizeof(BlastCompo_Alignment*)
);
incoming_align_set_tld[i] = (BlastCompo_Alignment**) calloc(
numFrames,
sizeof(BlastCompo_Alignment*)
);
savedParams_tld[i] = s_SavedParametersNew(
queryInfo->max_length,
numContexts,
compo_adjust_mode,
positionBased
);
if (savedParams_tld[i] == NULL) {
status_code = -1;
goto function_cleanup;
}
status_code = s_RecordInitialSearch(
savedParams_tld[i],
sbp,
scoringParams,
queryInfo->max_length,
compo_adjust_mode,
positionBased
);
if (status_code != 0) {
goto function_cleanup;
}
if ((int) compo_adjust_mode > 1 && !positionBased) {
NRrecord_tld[i] = Blast_CompositionWorkspaceNew();
status_code = Blast_CompositionWorkspaceInit(
NRrecord_tld[i],
scoringParams->options->matrix
);
if (status_code != 0) {
goto function_cleanup;
}
}
gapping_params_context_tld[i].gap_align = gap_align_tld[i];
gapping_params_context_tld[i].scoringParams = score_params_tld[i];
gapping_params_context_tld[i].sbp = sbp_tld[i];
gapping_params_context_tld[i].localScalingFactor = localScalingFactor;
gapping_params_context_tld[i].prog_number = program_number;
redo_align_params_tld[i] =
s_GetAlignParams(
&gapping_params_context_tld[i],
queryBlk,
queryInfo,
hitParams,
extendParams
);
if (redo_align_params_tld[i] == NULL) {
status_code = -1;
goto function_cleanup;
}
if (positionBased) {
matrix_tld[i] = sbp_tld[i]->psi_matrix->pssm->data;
} else {
matrix_tld[i] = sbp_tld[i]->matrix->data;
}
/**** Validate parameters *************/
if (matrix_tld[i] == NULL) {
goto function_cleanup;
}
}
/*
* There are two use cases here.
* (1) hsp_stream == NULL, so single match is passed in thisMatch.
* Also, seqSrc == NULL and subjectBlk are != NULL.
* (2) hsp_stream != NULL, so one or more matches are taken from
* hsp_stream, and thisMatch is (probably) NULL.
* Also, seqSrc != NULL, subjectBlk and thisMatch are == NULL.
*/
struct BlastHSPListLinkedList {
BlastHSPList* match;
struct BlastHSPListLinkedList* next;
};
typedef struct BlastHSPListLinkedList BlastHSPListLinkedList;
BlastHSPList** theseMatches = NULL;
int numMatches = 0;
if (hsp_stream == NULL) {
theseMatches = (BlastHSPList**) calloc(1, sizeof(BlastHSPList*));
*theseMatches = thisMatch;
numMatches = 1;
} else {
BlastHSPList* localMatch = NULL;
BlastHSPListLinkedList* head = NULL;
BlastHSPListLinkedList* tail = NULL;
/*
* Collect matches from stream into linked list, counting them
* along the way.
*/
while (BlastHSPStreamRead(hsp_stream, &localMatch)
!= kBlastHSPStream_Eof) {
BlastHSPListLinkedList* entry =
(BlastHSPListLinkedList*) calloc(
1,
sizeof(BlastHSPListLinkedList)
);
entry->match = localMatch;
if (head == NULL) {
head = entry;
} else {
tail->next = entry;
}
tail = entry;
++numMatches;
}
/*
* Convert linked list of matches into array.
*/
theseMatches =
(BlastHSPList**) calloc(numMatches, sizeof(BlastHSPList*));
int i;
for (i = 0; i < numMatches; ++i) {
theseMatches[i] = head->match;
BlastHSPListLinkedList* here = head;
head = head->next;
sfree(here);
}
}
Boolean interrupt = FALSE;
#pragma omp parallel \
default(none) num_threads(actual_num_threads) \
if(actual_num_threads>1) \
shared(interrupt, seqsrc_tld, score_params_tld, hit_params_tld, \
gap_align_tld, results_tld, \
redoneMatches_tld, \
STDERR_COMMA \
numQueries, numMatches, theseMatches, \
numFrames, program_number, subjectBlk_tld, positionBased, \
default_db_genetic_code, localScalingFactor, queryInfo, \
sbp, smithWaterman, compositionTestIndex_tld, forbidden, \
NRrecord_tld, actual_num_threads, sbp_tld, \
matrix_tld, query_info_tld, numContexts_tld, \
genetic_code_string, queryBlk, compo_adjust_mode, \
alignments_tld, incoming_align_set_tld, savedParams_tld, \
scoringParams, redo_align_params_tld, \
status_code_tld)
{
int b;
#pragma omp for schedule(static)
for (b = 0; b < numMatches; ++b) {
#pragma omp flush(interrupt)
if (!interrupt) {
BlastCompo_Alignment** alignments = NULL;
BlastCompo_Alignment** incoming_align_set = NULL;
Blast_CompositionWorkspace* NRrecord = NULL;
BlastCompo_QueryInfo* query_info = NULL;
int numAligns[6];
Blast_KarlinBlk* kbp = NULL;
BlastCompo_MatchingSequence matchingSeq = {0,};
BlastHSPList* hsp_list = NULL;
BlastCompo_Alignment* incoming_aligns = NULL;
Blast_RedoAlignParams* redo_align_params;
double best_evalue;
Int4 best_score;
int query_index;
int context_index;
int frame_index;
void* discarded_aligns = NULL;
BlastSeqSrc* seqSrc;
BlastScoringParameters* scoringParams;
BlastHitSavingParameters* hitParams;
BlastCompo_Heap* redoneMatches;
BlastScoreBlk* sbp;
BLAST_SequenceBlk* subjectBlk;
int numContexts;
int compositionTestIndex;
/* existing alignments for a match */
Int4** matrix; /* score matrix */
int* pStatusCode;
double pvalueForThisPair = (-1); /* p-value for this match
for composition; -1 == no adjustment*/
double LambdaRatio; /*lambda ratio*/
int tid = 0;
#ifdef _OPENMP
if(actual_num_threads > 1) {
tid = omp_get_thread_num();
}
#endif
seqSrc = seqsrc_tld[tid];
scoringParams = score_params_tld[tid];
hitParams = hit_params_tld[tid];
redoneMatches = redoneMatches_tld[tid];
alignments = alignments_tld[tid];
incoming_align_set = incoming_align_set_tld[tid];
NRrecord = NRrecord_tld[tid];
sbp = sbp_tld[tid];
redo_align_params = redo_align_params_tld[tid];
matrix = matrix_tld[tid];
pStatusCode = &status_code_tld[tid];
query_info = query_info_tld[tid];
numContexts = numContexts_tld[tid];
compositionTestIndex = compositionTestIndex_tld[tid];
subjectBlk = subjectBlk_tld[tid];
BlastHSPList* localMatch = theseMatches[b];
if (localMatch->hsp_array == NULL) {
if (seqSrc) {
continue;
}
if(actual_num_threads > 1) {
#pragma omp critical(intrpt)
interrupt = TRUE;
#pragma omp flush(interrupt)
continue;
}
}
if (BlastCompo_EarlyTermination(
localMatch->best_evalue,
redoneMatches,
numQueries
)) {
Blast_HSPListFree(localMatch);
if (seqSrc) {
continue;
}
if(actual_num_threads > 1) {
#pragma omp critical(intrpt)
interrupt = TRUE;
#pragma omp flush(interrupt)
continue;
}
}
query_index = localMatch->query_index;
context_index = query_index * numFrames;
BlastSeqSrcSetRangesArg * ranges = NULL;
/* Get the sequence for this match */
if (seqSrc && BlastSeqSrcGetSupportsPartialFetching(seqSrc)) {
ranges = BLAST_SetupPartialFetching(
program_number,
(BlastSeqSrc*) seqSrc,
(const BlastHSPList**)&localMatch,
1
);
}
if (subjectBlk) {
matchingSeq.length = subjectBlk->length;
matchingSeq.index = -1;
matchingSeq.local_data = subjectBlk;
} else {
*pStatusCode = s_MatchingSequenceInitialize(
&matchingSeq,
program_number,
seqSrc,
default_db_genetic_code,
localMatch->oid,
ranges
);
if (*pStatusCode != 0) {
/*
* some sequences may have been excluded by membit filtering
* so this is not really an exception
*/
*pStatusCode = 0;
goto match_loop_cleanup;
}
}
*pStatusCode = s_ResultHspToDistinctAlign(
incoming_align_set, /* o */
numAligns, /* o */
localMatch->hsp_array, /* i */
localMatch->hspcnt, /* i */
context_index, /* i */
queryInfo, /* i */
localScalingFactor /* i */
);
if (*pStatusCode != 0) {
goto match_loop_cleanup;
}
hsp_list = Blast_HSPListNew(0);
for (frame_index = 0;
frame_index < numFrames;
frame_index++, context_index++) {
incoming_aligns = incoming_align_set[frame_index];
if (!incoming_aligns) {
continue;
}
/*
* All alignments in thisMatch should be to the same query
*/
kbp = sbp->kbp_gap[context_index];
if (smithWaterman) {
*pStatusCode =
Blast_RedoOneMatchSmithWaterman(
alignments,
redo_align_params,
incoming_aligns,
numAligns[frame_index],
kbp->Lambda,
kbp->logK,
&matchingSeq,
query_info,
numQueries,
matrix,
BLASTAA_SIZE,
NRrecord,
&forbidden,
redoneMatches,
&pvalueForThisPair,
compositionTestIndex,
&LambdaRatio
);
} else {
*pStatusCode =
Blast_RedoOneMatch(
alignments, // thread-local
redo_align_params, // thread-local
incoming_aligns, // thread-local
numAligns[frame_index], // local
kbp->Lambda, // thread-local
&matchingSeq, // thread-local
-1, // const
query_info, // thread-local
numContexts, // thread-local
matrix, // thread-local
BLASTAA_SIZE, // const
NRrecord, // thread-local
&pvalueForThisPair, // local
compositionTestIndex, // thread-local
&LambdaRatio // local
);
}
if (*pStatusCode != 0) {
goto match_loop_cleanup;
}
if (alignments[context_index] != NULL) {
Int2 qframe = frame_index;
if (program_number == eBlastTypeBlastx) {
if (qframe < 3) {
qframe++;
} else {
qframe = 2 - qframe;
}
}
*pStatusCode =
s_HSPListFromDistinctAlignments(hsp_list,
&alignments[context_index],
matchingSeq.index,
queryInfo, qframe);
if (*pStatusCode) {
goto match_loop_cleanup;
}
}
BlastCompo_AlignmentsFree(&incoming_aligns, NULL);
incoming_align_set[frame_index] = NULL;
}
if (hsp_list->hspcnt > 1) {
s_HitlistReapContained(hsp_list->hsp_array,
&hsp_list->hspcnt);
}
*pStatusCode =
s_HitlistEvaluateAndPurge(&best_score, &best_evalue,
hsp_list,
seqSrc,
matchingSeq.length,
program_number,
queryInfo, context_index,
sbp, hitParams,
pvalueForThisPair, LambdaRatio,
matchingSeq.index);
if (*pStatusCode != 0) {
goto query_loop_cleanup;
}
if (best_evalue <= hitParams->options->expect_value) {
/* The best alignment is significant */
s_HSPListNormalizeScores(hsp_list, kbp->Lambda, kbp->logK,
localScalingFactor);
s_ComputeNumIdentities(
queryBlk,
queryInfo,
subjectBlk,
seqSrc,
hsp_list,
scoringParams->options,
genetic_code_string,
sbp,
ranges
);
if (!seqSrc) {
goto query_loop_cleanup;
}
if (BlastCompo_HeapWouldInsert(
&redoneMatches[query_index],
best_evalue,
best_score,
localMatch->oid
)) {
*pStatusCode =
BlastCompo_HeapInsert(
&redoneMatches[query_index],
hsp_list,
best_evalue,
best_score,
localMatch->oid,
&discarded_aligns
);
if (*pStatusCode == 0) {
hsp_list = NULL;
}
} else {
hsp_list = Blast_HSPListFree(hsp_list);
}
if (*pStatusCode) {
goto query_loop_cleanup;
}
if (discarded_aligns != NULL) {
Blast_HSPListFree(discarded_aligns);
}
}
query_loop_cleanup:
match_loop_cleanup:
if (seqSrc) {
localMatch = Blast_HSPListFree(localMatch);
} else {
Blast_HSPListSwap(localMatch, hsp_list);
localMatch->oid = hsp_list->oid;
}
hsp_list = Blast_HSPListFree(hsp_list);
ranges = BlastSeqSrcSetRangesArgFree(ranges);
if (*pStatusCode != 0) {
for (context_index = 0;
context_index < numContexts;
context_index++) {
BlastCompo_AlignmentsFree(
&alignments[context_index],
s_FreeEditScript
);
}
}
s_MatchingSequenceRelease(&matchingSeq);
BlastCompo_AlignmentsFree(&incoming_aligns, NULL);
if ((actual_num_threads > 1) &&
(*pStatusCode != 0 || !seqSrc)) {
#pragma omp critical(intrpt)
interrupt = TRUE;
#pragma omp flush(interrupt)
continue;
}
} /* end of if(!interrupt) */
}
#pragma omp barrier
/*
* end of omp parallel section
*/
}
function_cleanup:
for (i = 0; i < actual_num_threads; ++i) {
if (status_code_tld[i] != 0) {
status_code = status_code_tld[i];
}
}
for (i = 0; i < actual_num_threads; ++i) {
if (seqSrc && status_code == 0) {
s_FillResultsFromCompoHeaps(
results_tld[i],
redoneMatches_tld[i],
hitParams->options->hitlist_size
);
if (redoneMatches_tld[i] != NULL) {
int qi;
for (qi = 0; qi < numQueries; ++qi) {
sfree(redoneMatches_tld[i][qi].array);
sfree(redoneMatches_tld[i][qi].heapArray);
}
s_ClearHeap(redoneMatches_tld[i]);
}
} else {
if (redoneMatches_tld[i] != NULL) {
int qi;
for (qi = 0; qi < numQueries; ++qi) {
sfree(redoneMatches_tld[i][qi].array);
sfree(redoneMatches_tld[i][qi].heapArray);
}
s_ClearHeap(redoneMatches_tld[i]);
}
}
sfree(redoneMatches_tld[i]);
}
if (redoneMatches != NULL) {
int qi;
for (qi = 0; qi < numQueries; ++qi) {
sfree(redoneMatches[qi].array);
sfree(redoneMatches[qi].heapArray);
}
s_ClearHeap(redoneMatches);
}
if (hsp_stream != NULL) {
/* Reduce results from all threads and continue with business as usual */
SThreadLocalDataArray* thread_data =
SThreadLocalDataArrayNew(actual_num_threads);
int i;
for (i = 0; i < actual_num_threads; ++i) {
SThreadLocalData* tdi = thread_data->tld[i];
BlastHSPResults* rdi = results_tld[i];
tdi->hit_params = hit_params_tld[i];
hit_params_tld[i] = NULL;
tdi->results =
(BlastHSPResults*) calloc(1, sizeof(BlastHSPResults));
tdi->results->num_queries = rdi->num_queries;
tdi->results->hitlist_array =
(BlastHitList**) calloc(
tdi->results->num_queries,
sizeof(BlastHitList*)
);
int j;
for (j = 0; j < tdi->results->num_queries; ++j) {
tdi->results->hitlist_array[j] = rdi->hitlist_array[j];
rdi->hitlist_array[j] = NULL;
}
}
local_results = SThreadLocalDataArrayConsolidateResults(thread_data);
ASSERT(local_results);
/* post-traceback pipes */
BlastHSPStreamTBackClose(hsp_stream, local_results);
for (i = 0; i < local_results->num_queries; ++i) {
results->hitlist_array[i] = local_results->hitlist_array[i];
local_results->hitlist_array[i] = NULL;
}
for (i = 0; i < actual_num_threads; ++i) {
thread_data->tld[i]->hit_params = NULL;
int j;
for (j = 0; j < local_results->num_queries; ++j) {
thread_data->tld[i]->results->hitlist_array[j] =
Blast_HitListFree(
thread_data->tld[i]->results->hitlist_array[j]
);
}
sfree(thread_data->tld[i]->results->hitlist_array);
sfree(thread_data->tld[i]->results);
thread_data->tld[i] = SThreadLocalDataFree(thread_data->tld[i]);
}
sfree(thread_data->tld);
sfree(thread_data);
Blast_HSPResultsFree(local_results);
}
if (redoneMatches != NULL) {
for (query_index = 0; query_index < numQueries; query_index++) {
BlastCompo_HeapRelease(&redoneMatches[query_index]);
}
sfree(redoneMatches);
redoneMatches = NULL;
}
if (smithWaterman) {
Blast_ForbiddenRangesRelease(&forbidden);
}
if (gapAlign != NULL) {
gapAlign = BLAST_GapAlignStructFree(gapAlign);
}
s_RestoreSearch(sbp, scoringParams, savedParams, queryBlk->length,
positionBased, compo_adjust_mode);
s_SavedParametersFree(&savedParams);
for (i = 0; i < actual_num_threads; ++i) {
s_BlastScoreBlk_Free(&sbp_tld[i]);
gap_align_tld[i]->sbp = NULL;
s_BlastGapAlignStruct_Free(gap_align_tld[i]);
Blast_RedoAlignParamsFree(&redo_align_params_tld[i]);
sfree(alignments_tld[i]);
sfree(incoming_align_set_tld[i]);
Blast_CompositionWorkspaceFree(&NRrecord_tld[i]);
s_SavedParametersFree(&savedParams_tld[i]);
BlastSeqSrcFree(seqsrc_tld[i]);
results_tld[i] = Blast_HSPResultsFree(results_tld[i]);
s_FreeBlastCompo_QueryInfoArray(&query_info_tld[i], numContexts);
}
sfree(alignments_tld);
sfree(compositionTestIndex_tld);
sfree(gap_align_tld);
sfree(gapping_params_context_tld);
sfree(hit_params_tld);
sfree(incoming_align_set_tld);
sfree(matrix_tld);
sfree(NRrecord_tld);
sfree(numContexts_tld);
sfree(query_info_tld);
sfree(redo_align_params_tld);
sfree(redoneMatches_tld);
sfree(results_tld);
sfree(savedParams_tld);
sfree(sbp_tld);
sfree(score_params_tld);
sfree(seqsrc_tld);
sfree(status_code_tld);
sfree(subjectBlk_tld);
sfree(theseMatches);
return (Int2) status_code;
}
|
GB_unaryop__identity_uint8_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_uint16
// op(A') function: GB_tran__identity_uint8_uint16
// C type: uint8_t
// A type: uint16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_uint16
(
uint8_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp-simd-clone.c | /* OMP constructs' SIMD clone supporting code.
Copyright (C) 2005-2020 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "alloc-pool.h"
#include "tree-pass.h"
#include "ssa.h"
#include "cgraph.h"
#include "pretty-print.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "cfganal.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "langhooks.h"
#include "tree-cfg.h"
#include "tree-into-ssa.h"
#include "tree-dfa.h"
#include "cfgloop.h"
#include "symbol-summary.h"
#include "ipa-param-manipulation.h"
#include "tree-eh.h"
#include "varasm.h"
#include "stringpool.h"
#include "attribs.h"
#include "omp-simd-clone.h"
/* Return the number of elements in vector type VECTYPE, which is associated
with a SIMD clone. At present these always have a constant length. */
static unsigned HOST_WIDE_INT
simd_clone_subparts (tree vectype)
{
return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
}
/* Allocate a fresh `simd_clone' and return it. NARGS is the number
of arguments to reserve space for. */
static struct cgraph_simd_clone *
simd_clone_struct_alloc (int nargs)
{
struct cgraph_simd_clone *clone_info;
size_t len = (sizeof (struct cgraph_simd_clone)
+ nargs * sizeof (struct cgraph_simd_clone_arg));
clone_info = (struct cgraph_simd_clone *)
ggc_internal_cleared_alloc (len);
return clone_info;
}
/* Make a copy of the `struct cgraph_simd_clone' in FROM to TO. */
static inline void
simd_clone_struct_copy (struct cgraph_simd_clone *to,
struct cgraph_simd_clone *from)
{
memcpy (to, from, (sizeof (struct cgraph_simd_clone)
+ ((from->nargs - from->inbranch)
* sizeof (struct cgraph_simd_clone_arg))));
}
/* Fill an empty vector ARGS with parameter types of function FNDECL. This
uses TYPE_ARG_TYPES if available, otherwise falls back to types of
DECL_ARGUMENTS types. */
static void
simd_clone_vector_of_formal_parm_types (vec<tree> *args, tree fndecl)
{
if (TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
{
push_function_arg_types (args, TREE_TYPE (fndecl));
return;
}
push_function_arg_decls (args, fndecl);
unsigned int i;
tree arg;
FOR_EACH_VEC_ELT (*args, i, arg)
(*args)[i] = TREE_TYPE ((*args)[i]);
}
/* Given a simd function in NODE, extract the simd specific
information from the OMP clauses passed in CLAUSES, and return
the struct cgraph_simd_clone * if it should be cloned. *INBRANCH_SPECIFIED
is set to TRUE if the `inbranch' or `notinbranch' clause specified,
otherwise set to FALSE. */
static struct cgraph_simd_clone *
simd_clone_clauses_extract (struct cgraph_node *node, tree clauses,
bool *inbranch_specified)
{
auto_vec<tree> args;
simd_clone_vector_of_formal_parm_types (&args, node->decl);
tree t;
int n;
*inbranch_specified = false;
n = args.length ();
if (n > 0 && args.last () == void_type_node)
n--;
/* Allocate one more than needed just in case this is an in-branch
clone which will require a mask argument. */
struct cgraph_simd_clone *clone_info = simd_clone_struct_alloc (n + 1);
clone_info->nargs = n;
if (!clauses)
goto out;
clauses = TREE_VALUE (clauses);
if (!clauses || TREE_CODE (clauses) != OMP_CLAUSE)
goto out;
for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
{
switch (OMP_CLAUSE_CODE (t))
{
case OMP_CLAUSE_INBRANCH:
clone_info->inbranch = 1;
*inbranch_specified = true;
break;
case OMP_CLAUSE_NOTINBRANCH:
clone_info->inbranch = 0;
*inbranch_specified = true;
break;
case OMP_CLAUSE_SIMDLEN:
clone_info->simdlen
= TREE_INT_CST_LOW (OMP_CLAUSE_SIMDLEN_EXPR (t));
break;
case OMP_CLAUSE_LINEAR:
{
tree decl = OMP_CLAUSE_DECL (t);
tree step = OMP_CLAUSE_LINEAR_STEP (t);
int argno = TREE_INT_CST_LOW (decl);
if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (t))
{
enum cgraph_simd_clone_arg_type arg_type;
if (TREE_CODE (args[argno]) == REFERENCE_TYPE)
switch (OMP_CLAUSE_LINEAR_KIND (t))
{
case OMP_CLAUSE_LINEAR_REF:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP;
break;
case OMP_CLAUSE_LINEAR_UVAL:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP;
break;
case OMP_CLAUSE_LINEAR_VAL:
case OMP_CLAUSE_LINEAR_DEFAULT:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP;
break;
default:
gcc_unreachable ();
}
else
arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP;
clone_info->args[argno].arg_type = arg_type;
clone_info->args[argno].linear_step = tree_to_shwi (step);
gcc_assert (clone_info->args[argno].linear_step >= 0
&& clone_info->args[argno].linear_step < n);
}
else
{
if (POINTER_TYPE_P (args[argno]))
step = fold_convert (ssizetype, step);
if (!tree_fits_shwi_p (step))
{
warning_at (OMP_CLAUSE_LOCATION (t), 0,
"ignoring large linear step");
return NULL;
}
else if (integer_zerop (step))
{
warning_at (OMP_CLAUSE_LOCATION (t), 0,
"ignoring zero linear step");
return NULL;
}
else
{
enum cgraph_simd_clone_arg_type arg_type;
if (TREE_CODE (args[argno]) == REFERENCE_TYPE)
switch (OMP_CLAUSE_LINEAR_KIND (t))
{
case OMP_CLAUSE_LINEAR_REF:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP;
break;
case OMP_CLAUSE_LINEAR_UVAL:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP;
break;
case OMP_CLAUSE_LINEAR_VAL:
case OMP_CLAUSE_LINEAR_DEFAULT:
arg_type
= SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP;
break;
default:
gcc_unreachable ();
}
else
arg_type = SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP;
clone_info->args[argno].arg_type = arg_type;
clone_info->args[argno].linear_step = tree_to_shwi (step);
}
}
break;
}
case OMP_CLAUSE_UNIFORM:
{
tree decl = OMP_CLAUSE_DECL (t);
int argno = tree_to_uhwi (decl);
clone_info->args[argno].arg_type
= SIMD_CLONE_ARG_TYPE_UNIFORM;
break;
}
case OMP_CLAUSE_ALIGNED:
{
/* Ignore aligned (x) for declare simd, for the ABI we really
need an alignment specified. */
if (OMP_CLAUSE_ALIGNED_ALIGNMENT (t) == NULL_TREE)
break;
tree decl = OMP_CLAUSE_DECL (t);
int argno = tree_to_uhwi (decl);
clone_info->args[argno].alignment
= TREE_INT_CST_LOW (OMP_CLAUSE_ALIGNED_ALIGNMENT (t));
break;
}
default:
break;
}
}
out:
if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (node->decl))))
{
warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
"ignoring %<#pragma omp declare simd%> on function "
"with %<_Atomic%> qualified return type");
return NULL;
}
for (unsigned int argno = 0; argno < clone_info->nargs; argno++)
if (TYPE_ATOMIC (args[argno])
&& clone_info->args[argno].arg_type != SIMD_CLONE_ARG_TYPE_UNIFORM)
{
warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
"ignoring %<#pragma omp declare simd%> on function "
"with %<_Atomic%> qualified non-%<uniform%> argument");
args.release ();
return NULL;
}
return clone_info;
}
/* Given a SIMD clone in NODE, calculate the characteristic data
type and return the coresponding type. The characteristic data
type is computed as described in the Intel Vector ABI. */
static tree
simd_clone_compute_base_data_type (struct cgraph_node *node,
struct cgraph_simd_clone *clone_info)
{
tree type = integer_type_node;
tree fndecl = node->decl;
/* a) For non-void function, the characteristic data type is the
return type. */
if (TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE)
type = TREE_TYPE (TREE_TYPE (fndecl));
/* b) If the function has any non-uniform, non-linear parameters,
then the characteristic data type is the type of the first
such parameter. */
else
{
auto_vec<tree> map;
simd_clone_vector_of_formal_parm_types (&map, fndecl);
for (unsigned int i = 0; i < clone_info->nargs; ++i)
if (clone_info->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
{
type = map[i];
break;
}
}
/* c) If the characteristic data type determined by a) or b) above
is struct, union, or class type which is pass-by-value (except
for the type that maps to the built-in complex data type), the
characteristic data type is int. */
if (RECORD_OR_UNION_TYPE_P (type)
&& !aggregate_value_p (type, NULL)
&& TREE_CODE (type) != COMPLEX_TYPE)
return integer_type_node;
/* d) If none of the above three classes is applicable, the
characteristic data type is int. */
return type;
/* e) For Intel Xeon Phi native and offload compilation, if the
resulting characteristic data type is 8-bit or 16-bit integer
data type, the characteristic data type is int. */
/* Well, we don't handle Xeon Phi yet. */
}
static tree
simd_clone_mangle (struct cgraph_node *node,
struct cgraph_simd_clone *clone_info)
{
char vecsize_mangle = clone_info->vecsize_mangle;
char mask = clone_info->inbranch ? 'M' : 'N';
unsigned int simdlen = clone_info->simdlen;
unsigned int n;
pretty_printer pp;
gcc_assert (vecsize_mangle && simdlen);
pp_string (&pp, "_ZGV");
pp_character (&pp, vecsize_mangle);
pp_character (&pp, mask);
pp_decimal_int (&pp, simdlen);
for (n = 0; n < clone_info->nargs; ++n)
{
struct cgraph_simd_clone_arg arg = clone_info->args[n];
switch (arg.arg_type)
{
case SIMD_CLONE_ARG_TYPE_UNIFORM:
pp_character (&pp, 'u');
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
pp_character (&pp, 'l');
goto mangle_linear;
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
pp_character (&pp, 'R');
goto mangle_linear;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
pp_character (&pp, 'L');
goto mangle_linear;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
pp_character (&pp, 'U');
goto mangle_linear;
mangle_linear:
gcc_assert (arg.linear_step != 0);
if (arg.linear_step > 1)
pp_unsigned_wide_integer (&pp, arg.linear_step);
else if (arg.linear_step < 0)
{
pp_character (&pp, 'n');
pp_unsigned_wide_integer (&pp, (-(unsigned HOST_WIDE_INT)
arg.linear_step));
}
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
pp_string (&pp, "ls");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
pp_string (&pp, "Rs");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
pp_string (&pp, "Ls");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
pp_string (&pp, "Us");
pp_unsigned_wide_integer (&pp, arg.linear_step);
break;
default:
pp_character (&pp, 'v');
}
if (arg.alignment)
{
pp_character (&pp, 'a');
pp_decimal_int (&pp, arg.alignment);
}
}
pp_underscore (&pp);
const char *str = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (node->decl));
if (*str == '*')
++str;
pp_string (&pp, str);
str = pp_formatted_text (&pp);
/* If there already is a SIMD clone with the same mangled name, don't
add another one. This can happen e.g. for
#pragma omp declare simd
#pragma omp declare simd simdlen(8)
int foo (int, int);
if the simdlen is assumed to be 8 for the first one, etc. */
for (struct cgraph_node *clone = node->simd_clones; clone;
clone = clone->simdclone->next_clone)
if (id_equal (DECL_ASSEMBLER_NAME (clone->decl), str))
return NULL_TREE;
return get_identifier (str);
}
/* Create a simd clone of OLD_NODE and return it. */
static struct cgraph_node *
simd_clone_create (struct cgraph_node *old_node)
{
struct cgraph_node *new_node;
if (old_node->definition)
{
if (!old_node->has_gimple_body_p ())
return NULL;
old_node->get_body ();
new_node = old_node->create_version_clone_with_body (vNULL, NULL, NULL,
NULL, NULL,
"simdclone");
}
else
{
tree old_decl = old_node->decl;
tree new_decl = copy_node (old_node->decl);
DECL_NAME (new_decl) = clone_function_name_numbered (old_decl,
"simdclone");
SET_DECL_ASSEMBLER_NAME (new_decl, DECL_NAME (new_decl));
SET_DECL_RTL (new_decl, NULL);
DECL_STATIC_CONSTRUCTOR (new_decl) = 0;
DECL_STATIC_DESTRUCTOR (new_decl) = 0;
new_node = old_node->create_version_clone (new_decl, vNULL, NULL);
if (old_node->in_other_partition)
new_node->in_other_partition = 1;
}
if (new_node == NULL)
return new_node;
set_decl_built_in_function (new_node->decl, NOT_BUILT_IN, 0);
TREE_PUBLIC (new_node->decl) = TREE_PUBLIC (old_node->decl);
DECL_COMDAT (new_node->decl) = DECL_COMDAT (old_node->decl);
DECL_WEAK (new_node->decl) = DECL_WEAK (old_node->decl);
DECL_EXTERNAL (new_node->decl) = DECL_EXTERNAL (old_node->decl);
DECL_VISIBILITY_SPECIFIED (new_node->decl)
= DECL_VISIBILITY_SPECIFIED (old_node->decl);
DECL_VISIBILITY (new_node->decl) = DECL_VISIBILITY (old_node->decl);
DECL_DLLIMPORT_P (new_node->decl) = DECL_DLLIMPORT_P (old_node->decl);
if (DECL_ONE_ONLY (old_node->decl))
make_decl_one_only (new_node->decl, DECL_ASSEMBLER_NAME (new_node->decl));
/* The method cgraph_version_clone_with_body () will force the new
symbol local. Undo this, and inherit external visibility from
the old node. */
new_node->local = old_node->local;
new_node->externally_visible = old_node->externally_visible;
return new_node;
}
/* Adjust the return type of the given function to its appropriate
vector counterpart. Returns a simd array to be used throughout the
function as a return value. */
static tree
simd_clone_adjust_return_type (struct cgraph_node *node)
{
tree fndecl = node->decl;
tree orig_rettype = TREE_TYPE (TREE_TYPE (fndecl));
unsigned int veclen;
tree t;
/* Adjust the function return type. */
if (orig_rettype == void_type_node)
return NULL_TREE;
t = TREE_TYPE (TREE_TYPE (fndecl));
if (INTEGRAL_TYPE_P (t) || POINTER_TYPE_P (t))
veclen = node->simdclone->vecsize_int;
else
veclen = node->simdclone->vecsize_float;
veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (t));
if (veclen > node->simdclone->simdlen)
veclen = node->simdclone->simdlen;
if (POINTER_TYPE_P (t))
t = pointer_sized_int_node;
if (veclen == node->simdclone->simdlen)
t = build_vector_type (t, node->simdclone->simdlen);
else
{
t = build_vector_type (t, veclen);
t = build_array_type_nelts (t, node->simdclone->simdlen / veclen);
}
TREE_TYPE (TREE_TYPE (fndecl)) = t;
if (!node->definition)
return NULL_TREE;
t = DECL_RESULT (fndecl);
/* Adjust the DECL_RESULT. */
gcc_assert (TREE_TYPE (t) != void_type_node);
TREE_TYPE (t) = TREE_TYPE (TREE_TYPE (fndecl));
relayout_decl (t);
tree atype = build_array_type_nelts (orig_rettype,
node->simdclone->simdlen);
if (veclen != node->simdclone->simdlen)
return build1 (VIEW_CONVERT_EXPR, atype, t);
/* Set up a SIMD array to use as the return value. */
tree retval = create_tmp_var_raw (atype, "retval");
gimple_add_tmp_var (retval);
return retval;
}
/* Each vector argument has a corresponding array to be used locally
as part of the eventual loop. Create such temporary array and
return it.
PREFIX is the prefix to be used for the temporary.
TYPE is the inner element type.
SIMDLEN is the number of elements. */
static tree
create_tmp_simd_array (const char *prefix, tree type, int simdlen)
{
tree atype = build_array_type_nelts (type, simdlen);
tree avar = create_tmp_var_raw (atype, prefix);
gimple_add_tmp_var (avar);
return avar;
}
/* Modify the function argument types to their corresponding vector
counterparts if appropriate. Also, create one array for each simd
argument to be used locally when using the function arguments as
part of the loop.
NODE is the function whose arguments are to be adjusted.
If NODE does not represent function definition, returns NULL. Otherwise
returns an adjustment class that will be filled describing how the argument
declarations will be remapped. New arguments which are not to be remapped
are marked with USER_FLAG. */
static ipa_param_body_adjustments *
simd_clone_adjust_argument_types (struct cgraph_node *node)
{
auto_vec<tree> args;
if (node->definition)
push_function_arg_decls (&args, node->decl);
else
simd_clone_vector_of_formal_parm_types (&args, node->decl);
struct cgraph_simd_clone *sc = node->simdclone;
vec<ipa_adjusted_param, va_gc> *new_params = NULL;
vec_safe_reserve (new_params, sc->nargs);
unsigned i, j, veclen;
for (i = 0; i < sc->nargs; ++i)
{
ipa_adjusted_param adj;
memset (&adj, 0, sizeof (adj));
tree parm = args[i];
tree parm_type = node->definition ? TREE_TYPE (parm) : parm;
adj.base_index = i;
adj.prev_clone_index = i;
sc->args[i].orig_arg = node->definition ? parm : NULL_TREE;
sc->args[i].orig_type = parm_type;
switch (sc->args[i].arg_type)
{
default:
/* No adjustment necessary for scalar arguments. */
adj.op = IPA_PARAM_OP_COPY;
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
if (node->definition)
sc->args[i].simd_array
= create_tmp_simd_array (IDENTIFIER_POINTER (DECL_NAME (parm)),
TREE_TYPE (parm_type),
sc->simdlen);
adj.op = IPA_PARAM_OP_COPY;
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
case SIMD_CLONE_ARG_TYPE_VECTOR:
if (INTEGRAL_TYPE_P (parm_type) || POINTER_TYPE_P (parm_type))
veclen = sc->vecsize_int;
else
veclen = sc->vecsize_float;
veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (parm_type));
if (veclen > sc->simdlen)
veclen = sc->simdlen;
adj.op = IPA_PARAM_OP_NEW;
adj.param_prefix_index = IPA_PARAM_PREFIX_SIMD;
if (POINTER_TYPE_P (parm_type))
adj.type = build_vector_type (pointer_sized_int_node, veclen);
else
adj.type = build_vector_type (parm_type, veclen);
sc->args[i].vector_type = adj.type;
for (j = veclen; j < sc->simdlen; j += veclen)
{
vec_safe_push (new_params, adj);
if (j == veclen)
{
memset (&adj, 0, sizeof (adj));
adj.op = IPA_PARAM_OP_NEW;
adj.user_flag = 1;
adj.param_prefix_index = IPA_PARAM_PREFIX_SIMD;
adj.base_index = i;
adj.prev_clone_index = i;
adj.type = sc->args[i].vector_type;
}
}
if (node->definition)
sc->args[i].simd_array
= create_tmp_simd_array (DECL_NAME (parm)
? IDENTIFIER_POINTER (DECL_NAME (parm))
: NULL, parm_type, sc->simdlen);
}
vec_safe_push (new_params, adj);
}
if (sc->inbranch)
{
tree base_type = simd_clone_compute_base_data_type (sc->origin, sc);
ipa_adjusted_param adj;
memset (&adj, 0, sizeof (adj));
adj.op = IPA_PARAM_OP_NEW;
adj.user_flag = 1;
adj.param_prefix_index = IPA_PARAM_PREFIX_MASK;
adj.base_index = i;
adj.prev_clone_index = i;
if (INTEGRAL_TYPE_P (base_type) || POINTER_TYPE_P (base_type))
veclen = sc->vecsize_int;
else
veclen = sc->vecsize_float;
veclen /= GET_MODE_BITSIZE (SCALAR_TYPE_MODE (base_type));
if (veclen > sc->simdlen)
veclen = sc->simdlen;
if (sc->mask_mode != VOIDmode)
adj.type
= lang_hooks.types.type_for_mode (sc->mask_mode, 1);
else if (POINTER_TYPE_P (base_type))
adj.type = build_vector_type (pointer_sized_int_node, veclen);
else
adj.type = build_vector_type (base_type, veclen);
vec_safe_push (new_params, adj);
for (j = veclen; j < sc->simdlen; j += veclen)
vec_safe_push (new_params, adj);
/* We have previously allocated one extra entry for the mask. Use
it and fill it. */
sc->nargs++;
if (sc->mask_mode != VOIDmode)
base_type = boolean_type_node;
if (node->definition)
{
sc->args[i].orig_arg
= build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, base_type);
if (sc->mask_mode == VOIDmode)
sc->args[i].simd_array
= create_tmp_simd_array ("mask", base_type, sc->simdlen);
else if (veclen < sc->simdlen)
sc->args[i].simd_array
= create_tmp_simd_array ("mask", adj.type, sc->simdlen / veclen);
else
sc->args[i].simd_array = NULL_TREE;
}
sc->args[i].orig_type = base_type;
sc->args[i].arg_type = SIMD_CLONE_ARG_TYPE_MASK;
}
if (node->definition)
{
ipa_param_body_adjustments *adjustments
= new ipa_param_body_adjustments (new_params, node->decl);
adjustments->modify_formal_parameters ();
return adjustments;
}
else
{
tree new_arg_types = NULL_TREE, new_reversed;
bool last_parm_void = false;
if (args.length () > 0 && args.last () == void_type_node)
last_parm_void = true;
gcc_assert (TYPE_ARG_TYPES (TREE_TYPE (node->decl)));
j = vec_safe_length (new_params);
for (i = 0; i < j; i++)
{
struct ipa_adjusted_param *adj = &(*new_params)[i];
tree ptype;
if (adj->op == IPA_PARAM_OP_COPY)
ptype = args[adj->base_index];
else
ptype = adj->type;
new_arg_types = tree_cons (NULL_TREE, ptype, new_arg_types);
}
new_reversed = nreverse (new_arg_types);
if (last_parm_void)
{
if (new_reversed)
TREE_CHAIN (new_arg_types) = void_list_node;
else
new_reversed = void_list_node;
}
TYPE_ARG_TYPES (TREE_TYPE (node->decl)) = new_reversed;
return NULL;
}
}
/* Initialize and copy the function arguments in NODE to their
corresponding local simd arrays. Returns a fresh gimple_seq with
the instruction sequence generated. */
static gimple_seq
simd_clone_init_simd_arrays (struct cgraph_node *node,
ipa_param_body_adjustments *adjustments)
{
gimple_seq seq = NULL;
unsigned i = 0, j = 0, k;
for (tree arg = DECL_ARGUMENTS (node->decl);
arg;
arg = DECL_CHAIN (arg), i++, j++)
{
if ((*adjustments->m_adj_params)[j].op == IPA_PARAM_OP_COPY
|| POINTER_TYPE_P (TREE_TYPE (arg)))
continue;
node->simdclone->args[i].vector_arg = arg;
tree array = node->simdclone->args[i].simd_array;
if (node->simdclone->mask_mode != VOIDmode
&& node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK)
{
if (array == NULL_TREE)
continue;
unsigned int l
= tree_to_uhwi (TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (array))));
for (k = 0; k <= l; k++)
{
if (k)
{
arg = DECL_CHAIN (arg);
j++;
}
tree t = build4 (ARRAY_REF, TREE_TYPE (TREE_TYPE (array)),
array, size_int (k), NULL, NULL);
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
gimplify_and_add (t, &seq);
}
continue;
}
if (simd_clone_subparts (TREE_TYPE (arg)) == node->simdclone->simdlen)
{
tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
tree ptr = build_fold_addr_expr (array);
tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
build_int_cst (ptype, 0));
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
gimplify_and_add (t, &seq);
}
else
{
unsigned int simdlen = simd_clone_subparts (TREE_TYPE (arg));
tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array)));
for (k = 0; k < node->simdclone->simdlen; k += simdlen)
{
tree ptr = build_fold_addr_expr (array);
int elemsize;
if (k)
{
arg = DECL_CHAIN (arg);
j++;
}
tree elemtype = TREE_TYPE (TREE_TYPE (arg));
elemsize = GET_MODE_SIZE (SCALAR_TYPE_MODE (elemtype));
tree t = build2 (MEM_REF, TREE_TYPE (arg), ptr,
build_int_cst (ptype, k * elemsize));
t = build2 (MODIFY_EXPR, TREE_TYPE (t), t, arg);
gimplify_and_add (t, &seq);
}
}
}
return seq;
}
/* Callback info for ipa_simd_modify_stmt_ops below. */
struct modify_stmt_info {
ipa_param_body_adjustments *adjustments;
gimple *stmt;
gimple *after_stmt;
/* True if the parent statement was modified by
ipa_simd_modify_stmt_ops. */
bool modified;
};
/* Callback for walk_gimple_op.
Adjust operands from a given statement as specified in the
adjustments vector in the callback data. */
static tree
ipa_simd_modify_stmt_ops (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
struct modify_stmt_info *info = (struct modify_stmt_info *) wi->info;
tree *orig_tp = tp;
if (TREE_CODE (*tp) == ADDR_EXPR)
tp = &TREE_OPERAND (*tp, 0);
if (TREE_CODE (*tp) == BIT_FIELD_REF
|| TREE_CODE (*tp) == IMAGPART_EXPR
|| TREE_CODE (*tp) == REALPART_EXPR)
tp = &TREE_OPERAND (*tp, 0);
tree repl = NULL_TREE;
ipa_param_body_replacement *pbr = NULL;
if (TREE_CODE (*tp) == PARM_DECL)
{
pbr = info->adjustments->get_expr_replacement (*tp, true);
if (pbr)
repl = pbr->repl;
}
else if (TYPE_P (*tp))
*walk_subtrees = 0;
if (repl)
repl = unshare_expr (repl);
else
{
if (tp != orig_tp)
{
*walk_subtrees = 0;
bool modified = info->modified;
info->modified = false;
walk_tree (tp, ipa_simd_modify_stmt_ops, wi, wi->pset);
if (!info->modified)
{
info->modified = modified;
return NULL_TREE;
}
info->modified = modified;
repl = *tp;
}
else
return NULL_TREE;
}
if (tp != orig_tp)
{
if (gimple_code (info->stmt) == GIMPLE_PHI
&& pbr
&& TREE_CODE (*orig_tp) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (*orig_tp, 0)) == PARM_DECL
&& pbr->dummy)
{
gcc_assert (TREE_CODE (pbr->dummy) == SSA_NAME);
*orig_tp = pbr->dummy;
info->modified = true;
return NULL_TREE;
}
repl = build_fold_addr_expr (repl);
gimple *stmt;
if (is_gimple_debug (info->stmt))
{
tree vexpr = make_node (DEBUG_EXPR_DECL);
stmt = gimple_build_debug_source_bind (vexpr, repl, NULL);
DECL_ARTIFICIAL (vexpr) = 1;
TREE_TYPE (vexpr) = TREE_TYPE (repl);
SET_DECL_MODE (vexpr, TYPE_MODE (TREE_TYPE (repl)));
repl = vexpr;
}
else
{
stmt = gimple_build_assign (make_ssa_name (TREE_TYPE (repl)), repl);
repl = gimple_assign_lhs (stmt);
}
gimple_stmt_iterator gsi;
if (gimple_code (info->stmt) == GIMPLE_PHI)
{
if (info->after_stmt)
gsi = gsi_for_stmt (info->after_stmt);
else
gsi = gsi_after_labels (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
/* Cache SSA_NAME for next time. */
if (pbr
&& TREE_CODE (*orig_tp) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (*orig_tp, 0)) == PARM_DECL)
{
gcc_assert (!pbr->dummy);
pbr->dummy = repl;
}
}
else
gsi = gsi_for_stmt (info->stmt);
if (info->after_stmt)
gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
else
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
if (gimple_code (info->stmt) == GIMPLE_PHI)
info->after_stmt = stmt;
*orig_tp = repl;
}
else if (!useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (repl)))
{
tree vce = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (*tp), repl);
*tp = vce;
}
else
*tp = repl;
info->modified = true;
return NULL_TREE;
}
/* Traverse the function body and perform all modifications as
described in ADJUSTMENTS. At function return, ADJUSTMENTS will be
modified such that the replacement/reduction value will now be an
offset into the corresponding simd_array.
This function will replace all function argument uses with their
corresponding simd array elements, and ajust the return values
accordingly. */
static void
ipa_simd_modify_function_body (struct cgraph_node *node,
ipa_param_body_adjustments *adjustments,
tree retval_array, tree iter)
{
basic_block bb;
unsigned int i, j;
/* Register replacements for every function argument use to an offset into
the corresponding simd_array. */
for (i = 0, j = 0; i < node->simdclone->nargs; ++i, ++j)
{
if (!node->simdclone->args[i].vector_arg
|| (*adjustments->m_adj_params)[j].user_flag)
continue;
tree basetype = TREE_TYPE (node->simdclone->args[i].orig_arg);
tree vectype = TREE_TYPE (node->simdclone->args[i].vector_arg);
tree r = build4 (ARRAY_REF, basetype, node->simdclone->args[i].simd_array,
iter, NULL_TREE, NULL_TREE);
adjustments->register_replacement (&(*adjustments->m_adj_params)[j], r);
if (simd_clone_subparts (vectype) < node->simdclone->simdlen)
j += node->simdclone->simdlen / simd_clone_subparts (vectype) - 1;
}
tree name;
FOR_EACH_SSA_NAME (i, name, cfun)
{
tree base_var;
if (SSA_NAME_VAR (name)
&& TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL
&& (base_var
= adjustments->get_replacement_ssa_base (SSA_NAME_VAR (name))))
{
if (SSA_NAME_IS_DEFAULT_DEF (name))
{
tree old_decl = SSA_NAME_VAR (name);
bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
gimple_stmt_iterator gsi = gsi_after_labels (bb);
tree repl = adjustments->lookup_replacement (old_decl, 0);
gcc_checking_assert (repl);
repl = unshare_expr (repl);
set_ssa_default_def (cfun, old_decl, NULL_TREE);
SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
SSA_NAME_IS_DEFAULT_DEF (name) = 0;
gimple *stmt = gimple_build_assign (name, repl);
gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
}
else
SET_SSA_NAME_VAR_OR_IDENTIFIER (name, base_var);
}
}
struct modify_stmt_info info;
info.adjustments = adjustments;
FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (node->decl))
{
gimple_stmt_iterator gsi;
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gphi *phi = as_a <gphi *> (gsi_stmt (gsi));
int i, n = gimple_phi_num_args (phi);
info.stmt = phi;
info.after_stmt = NULL;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
info.modified = false;
wi.info = &info;
for (i = 0; i < n; ++i)
{
int walk_subtrees = 1;
tree arg = gimple_phi_arg_def (phi, i);
tree op = arg;
ipa_simd_modify_stmt_ops (&op, &walk_subtrees, &wi);
if (op != arg)
{
SET_PHI_ARG_DEF (phi, i, op);
gcc_assert (TREE_CODE (op) == SSA_NAME);
if (gimple_phi_arg_edge (phi, i)->flags & EDGE_ABNORMAL)
SSA_NAME_OCCURS_IN_ABNORMAL_PHI (op) = 1;
}
}
}
gsi = gsi_start_bb (bb);
while (!gsi_end_p (gsi))
{
gimple *stmt = gsi_stmt (gsi);
info.stmt = stmt;
info.after_stmt = NULL;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
info.modified = false;
wi.info = &info;
walk_gimple_op (stmt, ipa_simd_modify_stmt_ops, &wi);
if (greturn *return_stmt = dyn_cast <greturn *> (stmt))
{
tree retval = gimple_return_retval (return_stmt);
edge e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
e->flags |= EDGE_FALLTHRU;
if (!retval)
{
gsi_remove (&gsi, true);
continue;
}
/* Replace `return foo' with `retval_array[iter] = foo'. */
tree ref = build4 (ARRAY_REF, TREE_TYPE (retval),
retval_array, iter, NULL, NULL);
stmt = gimple_build_assign (ref, retval);
gsi_replace (&gsi, stmt, true);
info.modified = true;
}
if (info.modified)
{
update_stmt (stmt);
/* If the above changed the var of a debug bind into something
different, remove the debug stmt. We could also for all the
replaced parameters add VAR_DECLs for debug info purposes,
add debug stmts for those to be the simd array accesses and
replace debug stmt var operand with that var. Debugging of
vectorized loops doesn't work too well, so don't bother for
now. */
if ((gimple_debug_bind_p (stmt)
&& !DECL_P (gimple_debug_bind_get_var (stmt)))
|| (gimple_debug_source_bind_p (stmt)
&& !DECL_P (gimple_debug_source_bind_get_var (stmt))))
{
gsi_remove (&gsi, true);
continue;
}
if (maybe_clean_eh_stmt (stmt))
gimple_purge_dead_eh_edges (gimple_bb (stmt));
}
gsi_next (&gsi);
}
}
}
/* Helper function of simd_clone_adjust, return linear step addend
of Ith argument. */
static tree
simd_clone_linear_addend (struct cgraph_node *node, unsigned int i,
tree addtype, basic_block entry_bb)
{
tree ptype = NULL_TREE;
switch (node->simdclone->args[i].arg_type)
{
case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_CONSTANT_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP:
return build_int_cst (addtype, node->simdclone->args[i].linear_step);
case SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP:
ptype = TREE_TYPE (node->simdclone->args[i].orig_arg);
break;
case SIMD_CLONE_ARG_TYPE_LINEAR_VAL_VARIABLE_STEP:
case SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP:
ptype = TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg));
break;
default:
gcc_unreachable ();
}
unsigned int idx = node->simdclone->args[i].linear_step;
tree arg = node->simdclone->args[idx].orig_arg;
gcc_assert (is_gimple_reg_type (TREE_TYPE (arg)));
gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
gimple *g;
tree ret;
if (is_gimple_reg (arg))
ret = get_or_create_ssa_default_def (cfun, arg);
else
{
g = gimple_build_assign (make_ssa_name (TREE_TYPE (arg)), arg);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
if (TREE_CODE (TREE_TYPE (arg)) == REFERENCE_TYPE)
{
g = gimple_build_assign (make_ssa_name (TREE_TYPE (TREE_TYPE (arg))),
build_simple_mem_ref (ret));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
if (!useless_type_conversion_p (addtype, TREE_TYPE (ret)))
{
g = gimple_build_assign (make_ssa_name (addtype), NOP_EXPR, ret);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
if (POINTER_TYPE_P (ptype))
{
tree size = TYPE_SIZE_UNIT (TREE_TYPE (ptype));
if (size && TREE_CODE (size) == INTEGER_CST)
{
g = gimple_build_assign (make_ssa_name (addtype), MULT_EXPR,
ret, fold_convert (addtype, size));
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
ret = gimple_assign_lhs (g);
}
}
return ret;
}
/* Adjust the argument types in NODE to their appropriate vector
counterparts. */
static void
simd_clone_adjust (struct cgraph_node *node)
{
push_cfun (DECL_STRUCT_FUNCTION (node->decl));
TREE_TYPE (node->decl) = build_distinct_type_copy (TREE_TYPE (node->decl));
targetm.simd_clone.adjust (node);
tree retval = simd_clone_adjust_return_type (node);
ipa_param_body_adjustments *adjustments
= simd_clone_adjust_argument_types (node);
gcc_assert (adjustments);
push_gimplify_context ();
gimple_seq seq = simd_clone_init_simd_arrays (node, adjustments);
/* Adjust all uses of vector arguments accordingly. Adjust all
return values accordingly. */
tree iter = create_tmp_var (unsigned_type_node, "iter");
tree iter1 = make_ssa_name (iter);
tree iter2 = NULL_TREE;
ipa_simd_modify_function_body (node, adjustments, retval, iter1);
delete adjustments;
/* Initialize the iteration variable. */
basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
basic_block body_bb = split_block_after_labels (entry_bb)->dest;
gimple_stmt_iterator gsi = gsi_after_labels (entry_bb);
/* Insert the SIMD array and iv initialization at function
entry. */
gsi_insert_seq_before (&gsi, seq, GSI_NEW_STMT);
pop_gimplify_context (NULL);
gimple *g;
basic_block incr_bb = NULL;
class loop *loop = NULL;
/* Create a new BB right before the original exit BB, to hold the
iteration increment and the condition/branch. */
if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds))
{
basic_block orig_exit = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0)->src;
incr_bb = create_empty_bb (orig_exit);
incr_bb->count = profile_count::zero ();
add_bb_to_loop (incr_bb, body_bb->loop_father);
while (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds))
{
edge e = EDGE_PRED (EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
redirect_edge_succ (e, incr_bb);
incr_bb->count += e->count ();
}
}
else if (node->simdclone->inbranch)
{
incr_bb = create_empty_bb (entry_bb);
incr_bb->count = profile_count::zero ();
add_bb_to_loop (incr_bb, body_bb->loop_father);
}
if (incr_bb)
{
make_single_succ_edge (incr_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
gsi = gsi_last_bb (incr_bb);
iter2 = make_ssa_name (iter);
g = gimple_build_assign (iter2, PLUS_EXPR, iter1,
build_int_cst (unsigned_type_node, 1));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
/* Mostly annotate the loop for the vectorizer (the rest is done
below). */
loop = alloc_loop ();
cfun->has_force_vectorize_loops = true;
loop->safelen = node->simdclone->simdlen;
loop->force_vectorize = true;
loop->header = body_bb;
}
/* Branch around the body if the mask applies. */
if (node->simdclone->inbranch)
{
gsi = gsi_last_bb (loop->header);
tree mask_array
= node->simdclone->args[node->simdclone->nargs - 1].simd_array;
tree mask;
if (node->simdclone->mask_mode != VOIDmode)
{
tree shift_cnt;
if (mask_array == NULL_TREE)
{
tree arg = node->simdclone->args[node->simdclone->nargs
- 1].vector_arg;
mask = get_or_create_ssa_default_def (cfun, arg);
shift_cnt = iter1;
}
else
{
tree maskt = TREE_TYPE (mask_array);
int c = tree_to_uhwi (TYPE_MAX_VALUE (TYPE_DOMAIN (maskt)));
c = node->simdclone->simdlen / (c + 1);
int s = exact_log2 (c);
gcc_assert (s > 0);
c--;
tree idx = make_ssa_name (TREE_TYPE (iter1));
g = gimple_build_assign (idx, RSHIFT_EXPR, iter1,
build_int_cst (NULL_TREE, s));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)));
tree aref = build4 (ARRAY_REF,
TREE_TYPE (TREE_TYPE (mask_array)),
mask_array, idx, NULL, NULL);
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
shift_cnt = make_ssa_name (TREE_TYPE (iter1));
g = gimple_build_assign (shift_cnt, BIT_AND_EXPR, iter1,
build_int_cst (TREE_TYPE (iter1), c));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
}
g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)),
RSHIFT_EXPR, mask, shift_cnt);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
mask = gimple_assign_lhs (g);
g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)),
BIT_AND_EXPR, mask,
build_int_cst (TREE_TYPE (mask), 1));
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
mask = gimple_assign_lhs (g);
}
else
{
mask = make_ssa_name (TREE_TYPE (TREE_TYPE (mask_array)));
tree aref = build4 (ARRAY_REF,
TREE_TYPE (TREE_TYPE (mask_array)),
mask_array, iter1, NULL, NULL);
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
int bitsize = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (aref)));
if (!INTEGRAL_TYPE_P (TREE_TYPE (aref)))
{
aref = build1 (VIEW_CONVERT_EXPR,
build_nonstandard_integer_type (bitsize, 0),
mask);
mask = make_ssa_name (TREE_TYPE (aref));
g = gimple_build_assign (mask, aref);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
}
}
g = gimple_build_cond (EQ_EXPR, mask, build_zero_cst (TREE_TYPE (mask)),
NULL, NULL);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
edge e = make_edge (loop->header, incr_bb, EDGE_TRUE_VALUE);
e->probability = profile_probability::unlikely ().guessed ();
incr_bb->count += e->count ();
edge fallthru = FALLTHRU_EDGE (loop->header);
fallthru->flags = EDGE_FALSE_VALUE;
fallthru->probability = profile_probability::likely ().guessed ();
}
basic_block latch_bb = NULL;
basic_block new_exit_bb = NULL;
/* Generate the condition. */
if (incr_bb)
{
gsi = gsi_last_bb (incr_bb);
g = gimple_build_cond (LT_EXPR, iter2,
build_int_cst (unsigned_type_node,
node->simdclone->simdlen),
NULL, NULL);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
edge e = split_block (incr_bb, gsi_stmt (gsi));
latch_bb = e->dest;
new_exit_bb = split_block_after_labels (latch_bb)->dest;
loop->latch = latch_bb;
redirect_edge_succ (FALLTHRU_EDGE (latch_bb), body_bb);
edge new_e = make_edge (incr_bb, new_exit_bb, EDGE_FALSE_VALUE);
/* FIXME: Do we need to distribute probabilities for the conditional? */
new_e->probability = profile_probability::guessed_never ();
/* The successor of incr_bb is already pointing to latch_bb; just
change the flags.
make_edge (incr_bb, latch_bb, EDGE_TRUE_VALUE); */
FALLTHRU_EDGE (incr_bb)->flags = EDGE_TRUE_VALUE;
}
gphi *phi = create_phi_node (iter1, body_bb);
edge preheader_edge = find_edge (entry_bb, body_bb);
edge latch_edge = NULL;
add_phi_arg (phi, build_zero_cst (unsigned_type_node), preheader_edge,
UNKNOWN_LOCATION);
if (incr_bb)
{
latch_edge = single_succ_edge (latch_bb);
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
/* Generate the new return. */
gsi = gsi_last_bb (new_exit_bb);
if (retval
&& TREE_CODE (retval) == VIEW_CONVERT_EXPR
&& TREE_CODE (TREE_OPERAND (retval, 0)) == RESULT_DECL)
retval = TREE_OPERAND (retval, 0);
else if (retval)
{
retval = build1 (VIEW_CONVERT_EXPR,
TREE_TYPE (TREE_TYPE (node->decl)),
retval);
retval = force_gimple_operand_gsi (&gsi, retval, true, NULL,
false, GSI_CONTINUE_LINKING);
}
g = gimple_build_return (retval);
gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
}
/* Handle aligned clauses by replacing default defs of the aligned
uniform args with __builtin_assume_aligned (arg_N(D), alignment)
lhs. Handle linear by adding PHIs. */
for (unsigned i = 0; i < node->simdclone->nargs; i++)
if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
&& (TREE_ADDRESSABLE (node->simdclone->args[i].orig_arg)
|| !is_gimple_reg_type
(TREE_TYPE (node->simdclone->args[i].orig_arg))))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
if (is_gimple_reg_type (TREE_TYPE (orig_arg)))
iter1 = make_ssa_name (TREE_TYPE (orig_arg));
else
{
iter1 = create_tmp_var_raw (TREE_TYPE (orig_arg));
gimple_add_tmp_var (iter1);
}
gsi = gsi_after_labels (entry_bb);
g = gimple_build_assign (iter1, orig_arg);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
gsi = gsi_after_labels (body_bb);
g = gimple_build_assign (orig_arg, iter1);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
else if (node->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM
&& DECL_BY_REFERENCE (node->simdclone->args[i].orig_arg)
&& TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
== REFERENCE_TYPE
&& TREE_ADDRESSABLE
(TREE_TYPE (TREE_TYPE (node->simdclone->args[i].orig_arg))))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
tree def = ssa_default_def (cfun, orig_arg);
if (def && !has_zero_uses (def))
{
iter1 = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (orig_arg)));
gimple_add_tmp_var (iter1);
gsi = gsi_after_labels (entry_bb);
g = gimple_build_assign (iter1, build_simple_mem_ref (def));
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
gsi = gsi_after_labels (body_bb);
g = gimple_build_assign (build_simple_mem_ref (def), iter1);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
}
else if (node->simdclone->args[i].alignment
&& node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_UNIFORM
&& (node->simdclone->args[i].alignment
& (node->simdclone->args[i].alignment - 1)) == 0
&& TREE_CODE (TREE_TYPE (node->simdclone->args[i].orig_arg))
== POINTER_TYPE)
{
unsigned int alignment = node->simdclone->args[i].alignment;
tree orig_arg = node->simdclone->args[i].orig_arg;
tree def = ssa_default_def (cfun, orig_arg);
if (def && !has_zero_uses (def))
{
tree fn = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
gimple_seq seq = NULL;
bool need_cvt = false;
gcall *call
= gimple_build_call (fn, 2, def, size_int (alignment));
g = call;
if (!useless_type_conversion_p (TREE_TYPE (orig_arg),
ptr_type_node))
need_cvt = true;
tree t = make_ssa_name (need_cvt ? ptr_type_node : orig_arg);
gimple_call_set_lhs (g, t);
gimple_seq_add_stmt_without_update (&seq, g);
if (need_cvt)
{
t = make_ssa_name (orig_arg);
g = gimple_build_assign (t, NOP_EXPR, gimple_call_lhs (g));
gimple_seq_add_stmt_without_update (&seq, g);
}
gsi_insert_seq_on_edge_immediate
(single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)), seq);
entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
node->create_edge (cgraph_node::get_create (fn),
call, entry_bb->count);
imm_use_iterator iter;
use_operand_p use_p;
gimple *use_stmt;
tree repl = gimple_get_lhs (g);
FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
if (is_gimple_debug (use_stmt) || use_stmt == call)
continue;
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, repl);
}
}
else if ((node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP)
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP)
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_VARIABLE_STEP)
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_REF_VARIABLE_STEP))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
|| POINTER_TYPE_P (TREE_TYPE (orig_arg)));
tree def = NULL_TREE;
if (TREE_ADDRESSABLE (orig_arg))
{
def = make_ssa_name (TREE_TYPE (orig_arg));
iter1 = make_ssa_name (TREE_TYPE (orig_arg));
if (incr_bb)
iter2 = make_ssa_name (TREE_TYPE (orig_arg));
gsi = gsi_after_labels (entry_bb);
g = gimple_build_assign (def, orig_arg);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
else
{
def = ssa_default_def (cfun, orig_arg);
if (!def || has_zero_uses (def))
def = NULL_TREE;
else
{
iter1 = make_ssa_name (orig_arg);
if (incr_bb)
iter2 = make_ssa_name (orig_arg);
}
}
if (def)
{
phi = create_phi_node (iter1, body_bb);
add_phi_arg (phi, def, preheader_edge, UNKNOWN_LOCATION);
if (incr_bb)
{
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
? PLUS_EXPR : POINTER_PLUS_EXPR;
tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (orig_arg))
? TREE_TYPE (orig_arg) : sizetype;
tree addcst = simd_clone_linear_addend (node, i, addtype,
entry_bb);
gsi = gsi_last_bb (incr_bb);
g = gimple_build_assign (iter2, code, iter1, addcst);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
imm_use_iterator iter;
use_operand_p use_p;
gimple *use_stmt;
if (TREE_ADDRESSABLE (orig_arg))
{
gsi = gsi_after_labels (body_bb);
g = gimple_build_assign (orig_arg, iter1);
gsi_insert_before (&gsi, g, GSI_NEW_STMT);
}
else
FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
if (use_stmt == phi)
continue;
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, iter1);
}
}
else if (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_CONSTANT_STEP
|| (node->simdclone->args[i].arg_type
== SIMD_CLONE_ARG_TYPE_LINEAR_UVAL_VARIABLE_STEP))
{
tree orig_arg = node->simdclone->args[i].orig_arg;
tree def = ssa_default_def (cfun, orig_arg);
gcc_assert (!TREE_ADDRESSABLE (orig_arg)
&& TREE_CODE (TREE_TYPE (orig_arg)) == REFERENCE_TYPE);
if (def && !has_zero_uses (def))
{
tree rtype = TREE_TYPE (TREE_TYPE (orig_arg));
iter1 = make_ssa_name (orig_arg);
if (incr_bb)
iter2 = make_ssa_name (orig_arg);
tree iter3 = make_ssa_name (rtype);
tree iter4 = make_ssa_name (rtype);
tree iter5 = incr_bb ? make_ssa_name (rtype) : NULL_TREE;
gsi = gsi_after_labels (entry_bb);
gimple *load
= gimple_build_assign (iter3, build_simple_mem_ref (def));
gsi_insert_before (&gsi, load, GSI_NEW_STMT);
tree array = node->simdclone->args[i].simd_array;
TREE_ADDRESSABLE (array) = 1;
tree ptr = build_fold_addr_expr (array);
phi = create_phi_node (iter1, body_bb);
add_phi_arg (phi, ptr, preheader_edge, UNKNOWN_LOCATION);
if (incr_bb)
{
add_phi_arg (phi, iter2, latch_edge, UNKNOWN_LOCATION);
g = gimple_build_assign (iter2, POINTER_PLUS_EXPR, iter1,
TYPE_SIZE_UNIT (TREE_TYPE (iter3)));
gsi = gsi_last_bb (incr_bb);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
phi = create_phi_node (iter4, body_bb);
add_phi_arg (phi, iter3, preheader_edge, UNKNOWN_LOCATION);
if (incr_bb)
{
add_phi_arg (phi, iter5, latch_edge, UNKNOWN_LOCATION);
enum tree_code code = INTEGRAL_TYPE_P (TREE_TYPE (iter3))
? PLUS_EXPR : POINTER_PLUS_EXPR;
tree addtype = INTEGRAL_TYPE_P (TREE_TYPE (iter3))
? TREE_TYPE (iter3) : sizetype;
tree addcst = simd_clone_linear_addend (node, i, addtype,
entry_bb);
g = gimple_build_assign (iter5, code, iter4, addcst);
gsi = gsi_last_bb (incr_bb);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
g = gimple_build_assign (build_simple_mem_ref (iter1), iter4);
gsi = gsi_after_labels (body_bb);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
imm_use_iterator iter;
use_operand_p use_p;
gimple *use_stmt;
FOR_EACH_IMM_USE_STMT (use_stmt, iter, def)
if (use_stmt == load)
continue;
else
FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
SET_USE (use_p, iter1);
if (!TYPE_READONLY (rtype) && incr_bb)
{
tree v = make_ssa_name (rtype);
tree aref = build4 (ARRAY_REF, rtype, array,
size_zero_node, NULL_TREE,
NULL_TREE);
gsi = gsi_after_labels (new_exit_bb);
g = gimple_build_assign (v, aref);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
g = gimple_build_assign (build_simple_mem_ref (def), v);
gsi_insert_before (&gsi, g, GSI_SAME_STMT);
}
}
}
calculate_dominance_info (CDI_DOMINATORS);
if (loop)
add_loop (loop, loop->header->loop_father);
update_ssa (TODO_update_ssa);
pop_cfun ();
}
/* If the function in NODE is tagged as an elemental SIMD function,
create the appropriate SIMD clones. */
void
expand_simd_clones (struct cgraph_node *node)
{
tree attr = lookup_attribute ("omp declare simd",
DECL_ATTRIBUTES (node->decl));
if (attr == NULL_TREE
|| node->inlined_to
|| lookup_attribute ("noclone", DECL_ATTRIBUTES (node->decl)))
return;
/* Ignore
#pragma omp declare simd
extern int foo ();
in C, there we don't know the argument types at all. */
if (!node->definition
&& TYPE_ARG_TYPES (TREE_TYPE (node->decl)) == NULL_TREE)
return;
/* Call this before creating clone_info, as it might ggc_collect. */
if (node->definition && node->has_gimple_body_p ())
node->get_body ();
do
{
/* Start with parsing the "omp declare simd" attribute(s). */
bool inbranch_clause_specified;
struct cgraph_simd_clone *clone_info
= simd_clone_clauses_extract (node, TREE_VALUE (attr),
&inbranch_clause_specified);
if (clone_info == NULL)
continue;
int orig_simdlen = clone_info->simdlen;
tree base_type = simd_clone_compute_base_data_type (node, clone_info);
/* The target can return 0 (no simd clones should be created),
1 (just one ISA of simd clones should be created) or higher
count of ISA variants. In that case, clone_info is initialized
for the first ISA variant. */
int count
= targetm.simd_clone.compute_vecsize_and_simdlen (node, clone_info,
base_type, 0);
if (count == 0)
continue;
/* Loop over all COUNT ISA variants, and if !INBRANCH_CLAUSE_SPECIFIED,
also create one inbranch and one !inbranch clone of it. */
for (int i = 0; i < count * 2; i++)
{
struct cgraph_simd_clone *clone = clone_info;
if (inbranch_clause_specified && (i & 1) != 0)
continue;
if (i != 0)
{
clone = simd_clone_struct_alloc (clone_info->nargs
+ ((i & 1) != 0));
simd_clone_struct_copy (clone, clone_info);
/* Undo changes targetm.simd_clone.compute_vecsize_and_simdlen
and simd_clone_adjust_argument_types did to the first
clone's info. */
clone->nargs -= clone_info->inbranch;
clone->simdlen = orig_simdlen;
/* And call the target hook again to get the right ISA. */
targetm.simd_clone.compute_vecsize_and_simdlen (node, clone,
base_type,
i / 2);
if ((i & 1) != 0)
clone->inbranch = 1;
}
/* simd_clone_mangle might fail if such a clone has been created
already. */
tree id = simd_clone_mangle (node, clone);
if (id == NULL_TREE)
{
if (i == 0)
clone->nargs += clone->inbranch;
continue;
}
/* Only when we are sure we want to create the clone actually
clone the function (or definitions) or create another
extern FUNCTION_DECL (for prototypes without definitions). */
struct cgraph_node *n = simd_clone_create (node);
if (n == NULL)
{
if (i == 0)
clone->nargs += clone->inbranch;
continue;
}
n->simdclone = clone;
clone->origin = node;
clone->next_clone = NULL;
if (node->simd_clones == NULL)
{
clone->prev_clone = n;
node->simd_clones = n;
}
else
{
clone->prev_clone = node->simd_clones->simdclone->prev_clone;
clone->prev_clone->simdclone->next_clone = n;
node->simd_clones->simdclone->prev_clone = n;
}
symtab->change_decl_assembler_name (n->decl, id);
/* And finally adjust the return type, parameters and for
definitions also function body. */
if (node->definition)
simd_clone_adjust (n);
else
{
TREE_TYPE (n->decl)
= build_distinct_type_copy (TREE_TYPE (n->decl));
targetm.simd_clone.adjust (n);
simd_clone_adjust_return_type (n);
simd_clone_adjust_argument_types (n);
}
}
}
while ((attr = lookup_attribute ("omp declare simd", TREE_CHAIN (attr))));
}
/* Entry point for IPA simd clone creation pass. */
static unsigned int
ipa_omp_simd_clone (void)
{
struct cgraph_node *node;
FOR_EACH_FUNCTION (node)
expand_simd_clones (node);
return 0;
}
namespace {
const pass_data pass_data_omp_simd_clone =
{
SIMPLE_IPA_PASS, /* type */
"simdclone", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
( PROP_ssa | PROP_cfg ), /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_omp_simd_clone : public simple_ipa_opt_pass
{
public:
pass_omp_simd_clone(gcc::context *ctxt)
: simple_ipa_opt_pass(pass_data_omp_simd_clone, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *);
virtual unsigned int execute (function *) { return ipa_omp_simd_clone (); }
};
bool
pass_omp_simd_clone::gate (function *)
{
return targetm.simd_clone.compute_vecsize_and_simdlen != NULL;
}
} // anon namespace
simple_ipa_opt_pass *
make_pass_omp_simd_clone (gcc::context *ctxt)
{
return new pass_omp_simd_clone (ctxt);
}
|
GB_unaryop__identity_uint64_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint64_int32
// op(A') function: GB_tran__identity_uint64_int32
// C type: uint64_t
// A type: int32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint64_int32
(
uint64_t *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
PrimPar.c | /*
*
* Proyecto final
*
* Daniel Roa - A01021960
* Christian Dalma - A01423166
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
#define DIM 1000
void init(void);
void delete (int);
struct prims
{
int edge[DIM][DIM];
int dim;
int U[DIM];
int total_minDist;
int counts;
};
struct prims prim;
int main()
{
int ch, j, t, p_c, p_j, k, serial = 1, i;
int minDist;
int newElem;
prim.total_minDist = 0;
prim.counts = 0;
minDist = 1000;
printf("A continuación, inserte la cantidad de paradas de su ruta:\n");
scanf("%d", &prim.dim);
int num = prim.dim;
int id = omp_get_thread_num();
srand(time(NULL));
printf("Inserta el peso del tráfico: \n");
printf("Hilo %d\n", id);
for (i = 0; i < prim.dim; ++i)
{
for (j = 0; j < prim.dim; j++)
{
prim.edge[i][j] = rand() % num;
printf("Cost: %d ",prim.edge[i][j]);
printf("From %d To %d\n",i,j);
}
}
double start = omp_get_wtime();
init();
for (k = 0; k < prim.dim - 1; k++)
{
minDist = 1000;
for (i = 0; i < prim.counts; i++)
{
#pragma omp parallel for schedule(static)
for (j = 0; j < prim.dim; j++)
{
if (prim.edge[prim.U[i]][j] > minDist || prim.edge[prim.U[i]][j] == 0)
{
continue;
}
else
{
#pragma omp critical
{
minDist = prim.edge[prim.U[i]][j];
newElem = j;
printf("%d --> %d\n", i + 1, j + 1);
}
}
}
}
prim.total_minDist += minDist;
prim.U[i] = newElem;
delete (newElem);
prim.counts++;
}
printf("\n");
for (i = 0; i < prim.dim; i++)
{
printf("%d ", prim.U[i] + 1);
if (i < prim.dim - 1)
printf("-> ");
}
printf("\n\n");
double finito = omp_get_wtime() - start;
printf("Le tomó %.5g segundos en averiguar una solución.\n", finito);
printf("Distancia mínima entre paradas: %d\n\n", prim.total_minDist);
return 0;
}
void init(void)
{
int i, j;
prim.total_minDist = 0;
prim.counts = 0;
for (i = 0; i < prim.dim; i++)
prim.U[i] = -1;
prim.U[0] = 0;
delete (prim.U[0]);
prim.counts++;
}
void delete (int next_element)
{
int k;
for (k = 0; k < prim.dim; k++)
{
prim.edge[k][next_element] = 0;
}
} |
select_ci.c | /*
* Select CI
*/
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include "config.h"
#include <assert.h>
#include "vhf/fblas.h"
#include "np_helper/np_helper.h"
#include "fci.h"
#define BUFBASE 112
#define STRB_BLKSIZE 224
int SCIstr2addr(uint64_t str, uint64_t *strsbook, int nstrs)
{
int head = 0;
int tail = nstrs;
int mid;
int addr = -1;
while (head < tail) {
mid = (head + tail) / 2;
if (str == strsbook[mid]) {
addr = mid;
break;
} else if (str < strsbook[mid]) {
tail = mid;
} else {
head = mid + 1;
}
}
return addr;
}
static void make_occ_vir(int *occ, int *vir, uint64_t str1, int norb)
{
int i, io, iv;
for (i = 0, io = 0, iv = 0; i < norb; i++) {
if (str1 & (1ULL<<i)) {
occ[io] = i;
io += 1;
} else {
vir[iv] = i;
iv += 1;
}
}
}
void SCIcre_des_linkstr(int *link_index, int norb, int nstrs, int nocc,
uint64_t *strs, int store_trilidx)
{
int ninter = nstrs;
int occ[norb];
int vir[norb];
int nvir = norb - nocc;
int nlink = nocc * nvir + nocc;
int str_id, i, a, k, ai, addr;
uint64_t str0, str1;
int *tab;
for (str_id = 0; str_id < ninter; str_id++) {
str1 = strs[str_id];
make_occ_vir(occ, vir, str1, norb);
tab = link_index + str_id * nlink * 4;
if (store_trilidx) {
for (k = 0; k < nocc; k++) {
tab[k*4+0] = occ[k]*(occ[k]+1)/2+occ[k];
tab[k*4+2] = str_id;
tab[k*4+3] = 1;
}
for (a = 0; a < nvir; a++) {
for (i = 0; i < nocc; i++) {
str0 = (str1^(1ULL<<occ[i])) | (1ULL<<vir[a]);
addr = SCIstr2addr(str0, strs, nstrs);
if (addr >= 0) {
if (vir[a] > occ[i]) {
ai = vir[a]*(vir[a]+1)/2+occ[i];
} else {
ai = occ[i]*(occ[i]+1)/2+vir[a];
}
tab[k*4+0] = ai;
tab[k*4+2] = addr;
tab[k*4+3] = FCIcre_des_sign(vir[a], occ[i], str1);
k++;
}
} }
} else {
for (k = 0; k < nocc; k++) {
tab[k*4+0] = occ[k];
tab[k*4+1] = occ[k];
tab[k*4+2] = str_id;
tab[k*4+3] = 1;
}
for (a = 0; a < nvir; a++) {
for (i = 0; i < nocc; i++) {
str0 = (str1^(1ULL<<occ[i])) | (1ULL<<vir[a]);
addr = SCIstr2addr(str0, strs, nstrs);
if (addr >= 0) {
tab[k*4+0] = vir[a];
tab[k*4+1] = occ[i];
tab[k*4+2] = addr;
tab[k*4+3] = FCIcre_des_sign(vir[a], occ[i], str1);
k++;
}
} }
}
}
}
void SCIdes_des_linkstr(int *link_index, int norb, int nocc, int nstrs, int ninter,
uint64_t *strs, uint64_t *inter, int store_trilidx)
{
int occ[norb];
int vir[norb];
int str_id, i, j, k, addr;
uint64_t str0, str1;
int sign;
int nvir = norb - nocc + 2;
int nlink = nvir * nvir;
int *tab;
for (str_id = 0; str_id < ninter; str_id++) {
str1 = inter[str_id];
make_occ_vir(occ, vir, str1, norb);
tab = link_index + str_id * nlink * 4;
if (store_trilidx) {
for (k = 0, i = 1; i < nvir; i++) {
for (j = 0; j < i; j++) {
str0 = str1 | (1ULL<<vir[i]) | (1ULL<<vir[j]);
addr = SCIstr2addr(str0, strs, nstrs);
if (addr >= 0) {
sign = FCIcre_sign(vir[i], str1);
sign*= FCIdes_sign(vir[j], str0);
tab[k*4+0] = vir[i]*(vir[i]-1)/2+vir[j];;
tab[k*4+2] = addr;
tab[k*4+3] = sign;
k++;
}
} }
} else {
for (k = 0, i = 1; i < nvir; i++) {
for (j = 0; j < i; j++) {
str0 = str1 | (1ULL<<vir[i]) | (1ULL<<vir[j]);
addr = SCIstr2addr(str0, strs, nstrs);
if (addr >= 0) {
sign = FCIcre_sign(vir[i], str1);
sign*= FCIdes_sign(vir[j], str0);
tab[k*4+0] = vir[i];
tab[k*4+1] = vir[j];
tab[k*4+2] = addr;
tab[k*4+3] = sign;
k++;
tab[k*4+0] = vir[j];
tab[k*4+1] = vir[i];
tab[k*4+2] = addr;
tab[k*4+3] =-sign;
k++;
}
} }
}
}
}
int SCIdes_uniq_strs(uint64_t *uniq_strs, uint64_t *strs,
int norb, int nocc, int nstrs)
{
int str_id, i;
uint64_t str0, str1;
int ninter = 0;
for (str_id = 0; str_id < nstrs; str_id++) {
str0 = strs[str_id];
for (i = 0; i < norb; i++) {
if (str0 & (1ULL<<i)) {
str1 = str0 ^ (1ULL<<i);
uniq_strs[ninter] = str1;
ninter++;
}
}
}
return ninter;
}
void SCIdes_linkstr(int *link_index, int norb, int nocc, int nstrs, int ninter,
uint64_t *strs, uint64_t *inter)
{
int str_id, i, k, addr;
uint64_t str0, str1;
int nvir = norb - nocc + 1;
int nlink = nvir;
int *tab;
for (str_id = 0; str_id < ninter; str_id++) {
str1 = inter[str_id];
tab = link_index + str_id * nlink * 4;
for (k = 0, i = 0; i < norb; i++) {
if (!(str1 & (1ULL<<i))) {
str0 = str1 | (1ULL<<i);
addr = SCIstr2addr(str0, strs, nstrs);
if (addr >= 0) {
tab[k*4+0] = 0;
tab[k*4+1] = i;
tab[k*4+2] = addr;
tab[k*4+3] = FCIdes_sign(i, str0);
k++;
}
}
}
}
}
int SCIcre_uniq_strs(uint64_t *uniq_strs, uint64_t *strs,
int norb, int nocc, int nstrs)
{
int str_id, i;
uint64_t str0, str1;
int ninter = 0;
for (str_id = 0; str_id < nstrs; str_id++) {
str0 = strs[str_id];
for (i = 0; i < norb; i++) {
if (!(str0 & (1ULL<<i))) {
str1 = str0 | (1ULL<<i);
uniq_strs[ninter] = str1;
ninter++;
}
}
}
return ninter;
}
void SCIcre_linkstr(int *link_index, int norb, int nocc, int nstrs, int ninter,
uint64_t *strs, uint64_t *inter)
{
int str_id, i, k, addr;
uint64_t str0, str1;
int nlink = nocc + 1;
int *tab;
for (str_id = 0; str_id < ninter; str_id++) {
str1 = inter[str_id];
tab = link_index + str_id * nlink * 4;
for (k = 0, i = 0; i < norb; i++) {
if (str1 & (1ULL<<i)) {
str0 = str1 ^ (1ULL<<i);
addr = SCIstr2addr(str0, strs, nstrs);
if (addr >= 0) {
tab[k*4+0] = i;
tab[k*4+1] = 0;
tab[k*4+2] = addr;
tab[k*4+3] = FCIcre_sign(i, str0);
k++;
}
}
}
}
}
int SCIselect_strs(uint64_t *inter, uint64_t *strs,
double *eri, double *eri_pq_max, double *civec_max,
double select_cutoff, int norb, int nocc, int nstrs)
{
int nn = norb * norb;
int n3 = norb * nn;
int occ[norb];
int vir[norb];
int nvir = norb - nocc;
int str_id, i, a, j, b;
uint64_t str0, str1;
double ca;
double *peri;
int ninter = 0;
for (str_id = 0; str_id < nstrs; str_id++) {
str0 = strs[str_id];
make_occ_vir(occ, vir, str0, norb);
ca = civec_max[str_id];
for (i = 0; i < nocc; i++) {
for (a = 0; a < nvir; a++) {
if (eri_pq_max[vir[a]*norb+occ[i]]*ca > select_cutoff) {
str1 = (str0 ^ (1ULL<<occ[i])) | (1ULL<<vir[a]);
inter[ninter] = str1;
ninter++;
if (occ[i] < nocc && vir[a] >= nocc) {
peri = eri + n3 * vir[a] + nn * occ[i];
for (j = 0; j < i; j++) {
for (b = a+1; b < nvir; b++) {
if (fabs(peri[vir[b]*norb+occ[j]])*ca > select_cutoff) {
inter[ninter] = (str1 ^ (1ULL<<occ[j])) | (1ULL<<vir[b]);
ninter++;
} } }
}
} } }
}
return ninter;
}
/*
***********************************************************
*
* Need the permutation symmetry
* h2e[i,j,k,l] = h2e[j,i,k,l] = h2e[i,j,l,k] = h2e[j,i,l,k]
*
***********************************************************
*/
static void ctr_bbaa_kern(double *eri, double *ci0, double *ci1,
double *ci1buf, double *t1buf,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb)
{
const char TRANS_N = 'N';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * (norb+1) / 2;
double *t1 = t1buf;
double *vt1 = t1buf + nnorb*bcount;
memset(t1, 0, sizeof(double)*nnorb*bcount);
FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id,
norb, nb, nlinka, clink_indexa);
dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb,
&D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount);
FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id,
norb, nb, nlinkb, clink_indexb);
}
void SCIcontract_2e_bbaa(double *eri, double *ci0, double *ci1,
int norb, int na, int nb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb)
{
_LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na);
_LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb);
FCIcompress_link_tril(clinka, link_indexa, na, nlinka);
FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb);
#pragma omp parallel default(none) \
shared(eri, ci0, ci1, norb, na, nb, nlinka, nlinkb, \
clinka, clinkb)
{
int strk, ib, blen;
double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*norb*(norb+1));
double *ci1buf;
for (ib = 0; ib < nb; ib += STRB_BLKSIZE) {
blen = MIN(STRB_BLKSIZE, nb-ib);
#pragma omp for schedule(static)
for (strk = 0; strk < na; strk++) {
ctr_bbaa_kern(eri, ci0, ci1, ci1buf, t1buf,
blen, strk, ib, norb, na, nb,
nlinka, nlinkb, clinka, clinkb);
}
}
free(t1buf);
}
free(clinka);
free(clinkb);
}
static void ctr_aaaa_kern(double *eri, double *ci0, double *ci1,
double *ci1buf, double *t1buf,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb)
{
const char TRANS_N = 'N';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * (norb-1) / 2;
double *t1 = t1buf;
double *vt1 = t1buf + nnorb*bcount;
memset(t1, 0, sizeof(double)*nnorb*bcount);
FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id,
norb, nb, nlinka, clink_indexa);
dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb,
&D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount);
FCIspread_a_t1(ci1buf, vt1, bcount, stra_id, 0,
norb, bcount, nlinka, clink_indexa);
}
void SCIcontract_2e_aaaa(double *eri, double *ci0, double *ci1,
int norb, int na, int nb,
int inter_na, int nlinka, int *link_indexa)
{
_LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * inter_na);
FCIcompress_link_tril(clinka, link_indexa, inter_na, nlinka);
_LinkTrilT *clinkb;
double *ci1bufs[MAX_THREADS];
#pragma omp parallel default(none) \
shared(eri, ci0, ci1, norb, na, nb, inter_na, nlinka, clinka, clinkb, \
ci1bufs)
{
int strk, ib, blen;
double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*norb*norb);
double *ci1buf = malloc(sizeof(double) * na*STRB_BLKSIZE);
ci1bufs[omp_get_thread_num()] = ci1buf;
for (ib = 0; ib < nb; ib += STRB_BLKSIZE) {
blen = MIN(STRB_BLKSIZE, nb-ib);
memset(ci1buf, 0, sizeof(double) * na*blen);
#pragma omp for schedule(static)
for (strk = 0; strk < inter_na; strk++) {
ctr_aaaa_kern(eri, ci0, ci1, ci1buf, t1buf,
blen, strk, ib, norb, na, nb,
nlinka, 0, clinka, clinkb);
}
FCIomp_reduce_inplace(ci1bufs, blen*na);
#pragma omp master
FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen);
}
free(ci1buf);
free(t1buf);
}
free(clinka);
}
/*************************************************
*
* 2-particle DM
*
*************************************************/
void SCIrdm2_a_t1ci(double *ci0, double *t1,
int bcount, int stra_id, int strb_id,
int norb, int nstrb, int nlinka, _LinkT *clink_indexa)
{
ci0 += strb_id;
int i, j, k, a, sign;
size_t str1;
const _LinkT *tab = clink_indexa + stra_id * nlinka;
double *pt1, *pci;
for (j = 0; j < nlinka; j++) {
a = EXTRACT_CRE (tab[j]);
i = EXTRACT_DES (tab[j]);
str1 = EXTRACT_ADDR(tab[j]);
sign = EXTRACT_SIGN(tab[j]);
pci = ci0 + str1*nstrb;
pt1 = t1 + (i*norb+a) * bcount;
if (sign == 0) {
break;
} else if (sign > 0) {
for (k = 0; k < bcount; k++) {
pt1[k] += pci[k];
}
} else {
for (k = 0; k < bcount; k++) {
pt1[k] -= pci[k];
}
}
}
}
void SCIrdm2kern_aaaa(double *rdm2, double *bra, double *ket, double *buf,
int bcount, int stra_id, int strb_id, int norb,
int na, int nb, int nlinka, _LinkT *clink_indexa)
{
const char TRANS_N = 'N';
const char TRANS_T = 'T';
const double D1 = 1;
const double DN1 = -1;
const int nnorb = norb * norb;
memset(buf, 0, sizeof(double)*nnorb*bcount);
SCIrdm2_a_t1ci(ket, buf, bcount, stra_id, strb_id,
norb, nb, nlinka, clink_indexa);
dgemm_(&TRANS_T, &TRANS_N, &nnorb, &nnorb, &bcount,
&DN1, buf, &bcount, buf, &bcount, &D1, rdm2, &nnorb);
}
void SCIrdm2_aaaa(void (*dm2kernel)(), double *rdm2, double *bra, double *ket,
int norb, int na, int nb, int inter_na, int nlinka,
int *link_indexa)
{
const int nnorb = norb * norb;
double *pdm2;
memset(rdm2, 0, sizeof(double) * nnorb*nnorb);
_LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * inter_na);
FCIcompress_link(clinka, link_indexa, norb, inter_na, nlinka);
#pragma omp parallel default(none) \
shared(dm2kernel, bra, ket, norb, na, nb, inter_na, nlinka, clinka, rdm2), \
private(pdm2)
{
int strk, i, ib, blen;
double *buf = malloc(sizeof(double) * nnorb*BUFBASE*2);
pdm2 = calloc(nnorb*nnorb, sizeof(double));
#pragma omp for schedule(dynamic, 40)
for (strk = 0; strk < inter_na; strk++) {
for (ib = 0; ib < nb; ib += BUFBASE) {
blen = MIN(BUFBASE, nb-ib);
(*dm2kernel)(pdm2, bra, ket, buf, blen, strk, ib,
norb, na, nb, nlinka, clinka);
}
}
#pragma omp critical
{
for (i = 0; i < nnorb*nnorb; i++) {
rdm2[i] += pdm2[i];
}
}
free(pdm2);
free(buf);
}
free(clinka);
int shape[] = {norb, nnorb, norb};
pdm2 = malloc(sizeof(double) * nnorb*nnorb);
NPdtranspose_021(shape, rdm2, pdm2);
memcpy(rdm2, pdm2, sizeof(double) * nnorb*nnorb);
free(pdm2);
}
/***********************************************************************
*
* With symmetry
*
***********************************************************************/
static void ctr_bbaa_symm(double *eri, double *ci0, double *ci1,
double *ci1buf, double *t1buf,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb,
int *dimirrep, int totirrep)
{
const char TRANS_N = 'N';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * (norb+1) / 2;
int ir, p0;
double *t1 = t1buf;
double *vt1 = t1buf + nnorb*bcount;
memset(t1, 0, sizeof(double)*nnorb*bcount);
FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id,
norb, nb, nlinka, clink_indexa);
for (ir = 0, p0 = 0; ir < totirrep; ir++) {
dgemm_(&TRANS_N, &TRANS_N, &bcount, dimirrep+ir, dimirrep+ir,
&D1, t1+p0*bcount, &bcount, eri+p0*nnorb+p0, &nnorb,
&D0, vt1+p0*bcount, &bcount);
p0 += dimirrep[ir];
}
FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id,
norb, nb, nlinkb, clink_indexb);
}
void SCIcontract_2e_bbaa_symm(double *eri, double *ci0, double *ci1,
int norb, int na, int nb, int nlinka, int nlinkb,
int *link_indexa, int *link_indexb,
int *dimirrep, int totirrep)
{
_LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na);
_LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb);
FCIcompress_link_tril(clinka, link_indexa, na, nlinka);
FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb);
#pragma omp parallel default(none) \
shared(eri, ci0, ci1, norb, na, nb, nlinka, nlinkb, \
clinka, clinkb, dimirrep, totirrep)
{
int strk, ib, blen;
double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*norb*(norb+1));
double *ci1buf;
for (ib = 0; ib < nb; ib += STRB_BLKSIZE) {
blen = MIN(STRB_BLKSIZE, nb-ib);
#pragma omp for schedule(static)
for (strk = 0; strk < na; strk++) {
ctr_bbaa_symm(eri, ci0, ci1, ci1buf, t1buf,
blen, strk, ib, norb, na, nb,
nlinka, nlinkb, clinka, clinkb,
dimirrep, totirrep);
}
}
free(t1buf);
}
free(clinka);
free(clinkb);
}
static void ctr_aaaa_symm(double *eri, double *ci0, double *ci1,
double *ci1buf, double *t1buf,
int bcount, int stra_id, int strb_id,
int norb, int na, int nb, int nlinka, int nlinkb,
_LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb,
int *dimirrep, int totirrep)
{
const char TRANS_N = 'N';
const double D0 = 0;
const double D1 = 1;
const int nnorb = norb * (norb-1) / 2;
int ir, p0;
double *t1 = t1buf;
double *vt1 = t1buf + nnorb*bcount;
memset(t1, 0, sizeof(double)*nnorb*bcount);
FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id,
norb, nb, nlinka, clink_indexa);
for (ir = 0, p0 = 0; ir < totirrep; ir++) {
dgemm_(&TRANS_N, &TRANS_N, &bcount, dimirrep+ir, dimirrep+ir,
&D1, t1+p0*bcount, &bcount, eri+p0*nnorb+p0, &nnorb,
&D0, vt1+p0*bcount, &bcount);
p0 += dimirrep[ir];
}
FCIspread_a_t1(ci1buf, vt1, bcount, stra_id, 0,
norb, bcount, nlinka, clink_indexa);
}
void SCIcontract_2e_aaaa_symm(double *eri, double *ci0, double *ci1,
int norb, int na, int nb,
int inter_na, int nlinka, int *link_indexa,
int *dimirrep, int totirrep)
{
_LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * inter_na);
FCIcompress_link_tril(clinka, link_indexa, inter_na, nlinka);
_LinkTrilT *clinkb;
double *ci1bufs[MAX_THREADS];
#pragma omp parallel default(none) \
shared(eri, ci0, ci1, norb, na, nb, inter_na, nlinka, clinka, clinkb, \
dimirrep, totirrep, ci1bufs)
{
int strk, ib, blen;
double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*norb*norb);
double *ci1buf = malloc(sizeof(double) * na*STRB_BLKSIZE);
ci1bufs[omp_get_thread_num()] = ci1buf;
for (ib = 0; ib < nb; ib += STRB_BLKSIZE) {
blen = MIN(STRB_BLKSIZE, nb-ib);
memset(ci1buf, 0, sizeof(double) * na*blen);
#pragma omp for schedule(static)
for (strk = 0; strk < inter_na; strk++) {
ctr_aaaa_symm(eri, ci0, ci1, ci1buf, t1buf,
blen, strk, ib, norb, na, nb,
nlinka, 0, clinka, clinkb,
dimirrep, totirrep);
}
FCIomp_reduce_inplace(ci1bufs, blen*na);
#pragma omp master
FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen);
}
free(ci1buf);
free(t1buf);
}
free(clinka);
}
|
GB_unop__log2_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log2_fp32_fp32)
// op(A') function: GB (_unop_tran__log2_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = log2f (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log2f (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = log2f (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log2_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log2f (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log2f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log2_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
salted_sha1_fmt_plug.c | /*
* generic salted-sha1 support for LDAP style password storage
*
* Copyright (c) 2003 Simon Marechal, salt length fixes (c) 2012 magnum
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_saltedsha;
#elif FMT_REGISTERS_H
john_register_one(&fmt_saltedsha);
#else
#include <string.h>
#include "misc.h"
#include "formats.h"
#include "arch.h"
#include "options.h"
#include "johnswap.h"
#include "salted_sha1_common.h"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1)
#endif
#include "simd-intrinsics.h"
#include "common.h"
#include "sha.h"
#include "base64.h"
#ifdef _OPENMP
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Salted-SHA1"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH (55-MAX_SALT_LEN)
#define BINARY_ALIGN 4
#define SALT_SIZE (MAX_SALT_LEN + sizeof(unsigned int))
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32 ) //for endianity conversion
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
struct s_salt
{
unsigned int len;
union {
unsigned char c[MAX_SALT_LEN];
ARCH_WORD_32 w32;
} data;
};
static struct s_salt *saved_salt;
#ifdef SIMD_COEF_32
static ARCH_WORD_32 (*saved_key)[SHA_BUF_SIZ*NBKEYS];
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE/4*NBKEYS];
static unsigned int *saved_len;
static unsigned char out[PLAINTEXT_LENGTH + 1];
static int last_salt_size;
#else
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / 4];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifndef SIMD_COEF_32
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
#else
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_key = mem_calloc_align(self->params.max_keys_per_crypt/NBKEYS,
sizeof(*crypt_key), MEM_ALIGN_SIMD);
#endif
}
static void done(void)
{
MEM_FREE(crypt_key);
MEM_FREE(saved_key);
#ifdef SIMD_COEF_32
MEM_FREE(saved_len);
#endif
}
static void * get_binary(char *ciphertext) {
static char *realcipher;
if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE + 1 + SALT_SIZE, MEM_ALIGN_WORD);
ciphertext += NSLDAP_MAGIC_LENGTH;
memset(realcipher, 0, BINARY_SIZE);
base64_decode(ciphertext, strlen(ciphertext), realcipher);
#ifdef SIMD_COEF_32
alter_endianity((unsigned char *)realcipher, BINARY_SIZE);
#endif
return (void *)realcipher;
}
static void set_key(char *key, int index)
{
#ifdef SIMD_COEF_32
#if ARCH_ALLOWS_UNALIGNED
const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key;
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t));
const ARCH_WORD_32 *wkey = (uint32_t*)(is_aligned(key, sizeof(uint32_t)) ?
key : strcpy(buf_aligned, key));
#endif
ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32*)saved_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((unsigned char)(temp = *wkey++)) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8));
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16));
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = JOHNSWAP(temp | (0x80U << 24));
len+=3;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP(temp);
len += 4;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80000000;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
saved_len[index] = len;
#else
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1);
#endif
}
static void * get_salt(char * ciphertext)
{
static struct s_salt cursalt;
char *p;
char realcipher[CIPHERTEXT_LENGTH];
int len;
ciphertext += NSLDAP_MAGIC_LENGTH;
memset(realcipher, 0, sizeof(realcipher));
memset(&cursalt, 0, sizeof(struct s_salt));
len = strlen(ciphertext);
base64_decode(ciphertext, len, realcipher);
// We now support any salt length up to SALT_SIZE
cursalt.len = (len + 3) / 4 * 3 - BINARY_SIZE;
p = &ciphertext[len];
while (*--p == '=')
cursalt.len--;
memcpy(cursalt.data.c, realcipher+BINARY_SIZE, cursalt.len);
return &cursalt;
}
static char *get_key(int index) {
#ifdef SIMD_COEF_32
unsigned int i,s;
s = saved_len[index];
for(i=0;i<s;i++)
out[i] = ((char*)saved_key)[GETPOS(i, index)];
out[i] = 0;
return (char *) out;
#else
return saved_key[index];
#endif
}
static int cmp_all(void *binary, int count) {
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_32
if (((ARCH_WORD_32 *) binary)[0] == ((ARCH_WORD_32*)crypt_key)[(index&(SIMD_COEF_32-1)) + index/SIMD_COEF_32*5*SIMD_COEF_32])
#else
if ( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)&(crypt_key[index][0]))[0] )
#endif
return 1;
return 0;
}
static int cmp_exact(char *source, int index)
{
return (1);
}
static int cmp_one(void * binary, int index)
{
#ifdef SIMD_COEF_32
int i;
for (i = 0; i < BINARY_SIZE/sizeof(ARCH_WORD_32); i++)
if (((ARCH_WORD_32 *) binary)[i] != ((ARCH_WORD_32*)crypt_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32+i*SIMD_COEF_32])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static void set_salt(void *salt) {
saved_salt = salt;
}
#ifdef SIMD_COEF_32
static inline void set_onesalt(int index)
{
unsigned int i, idx=index%NBKEYS;
unsigned char *sk = (unsigned char*)&saved_key[index/NBKEYS];
for(i=0;i<saved_salt->len;++i)
sk[GETPOS(i+saved_len[index], idx)] = saved_salt->data.c[i];
sk[GETPOS(i+saved_len[index], idx)] = 0x80;
while (++i <= last_salt_size)
sk[GETPOS(i+saved_len[index], idx)] = 0;
((unsigned int*)sk)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + idx/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] = (saved_salt->len + saved_len[index])<<3;
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#ifdef SIMD_COEF_32
int inc = NBKEYS;
#else
int inc = 1;
#endif
#pragma omp parallel for
for (index=0; index < count; index += inc)
#endif
{
#ifdef SIMD_COEF_32
unsigned int i;
for(i=0;i<NBKEYS;i++)
set_onesalt(i+index);
SIMDSHA1body(saved_key[index/NBKEYS], crypt_key[index/NBKEYS], NULL, SSEi_MIXED_IN);
#else
SHA_CTX ctx;
SHA1_Init( &ctx );
SHA1_Update( &ctx, (unsigned char *) saved_key[index], strlen( saved_key[index] ) );
SHA1_Update( &ctx, (unsigned char *) saved_salt->data.c, saved_salt->len);
SHA1_Final( (unsigned char *)crypt_key[index], &ctx);
#endif
}
#ifdef SIMD_COEF_32
last_salt_size = saved_salt->len;
#endif
return count;
}
#ifdef SIMD_COEF_32
#define HASH_OFFSET (index&(SIMD_COEF_32-1))+(((unsigned int)index%NBKEYS)/SIMD_COEF_32)*SIMD_COEF_32*5
static int get_hash_0(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; }
#endif
static int salt_hash(void *salt)
{
struct s_salt * mysalt = salt;
return mysalt->data.w32 & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_saltedsha = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD,
{ NULL },
salted_sha1_common_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
salted_sha1_common_valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
region1.c | #include <omp.h>
#include <assert.h>
#define N 10
int main (int argc, char * argv[]){
int i;
int a[N];
int sum = 0;
#pragma omp parallel
{
int max = N;
#pragma omp for
for(i=0; i<max; i++)
a[i] = 0;
max = 0;
}
#pragma omp parallel
if (i>0) {
sum = N;
} else {
#pragma omp for reduction(+:sum)
for(i=0; i<N; i++)
sum = sum + a[i];
}
{
int counter = 0;
#pragma omp parallel
if (counter == 0) counter++;
assert(counter == 1);
}
}
|
GB_binop__iseq_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int8)
// A*D function (colscale): GB (_AxD__iseq_int8)
// D*A function (rowscale): GB (_DxB__iseq_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int8)
// C=scalar+B GB (_bind1st__iseq_int8)
// C=scalar+B' GB (_bind1st_tran__iseq_int8)
// C=A+scalar GB (_bind2nd__iseq_int8)
// C=A'+scalar GB (_bind2nd_tran__iseq_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT8 || GxB_NO_ISEQ_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_uint64_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint64_int64
// op(A') function: GB_tran__ainv_uint64_int64
// C type: uint64_t
// A type: int64_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint64_int64
(
uint64_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint64_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
combined-1.c | /* { dg-do compile } */
/* { dg-options "-O1 -fopenmp -fdump-tree-optimized" } */
int a[10];
int foo (void)
{
int i;
#pragma omp parallel for schedule(runtime)
for (i = 0; i < 10; i++)
a[i] = i;
#pragma omp parallel
#pragma omp for schedule(runtime)
for (i = 0; i < 10; i++)
a[i] = 10 - i;
#pragma omp parallel
{
#pragma omp for schedule(runtime)
for (i = 0; i < 10; i++)
a[i] = i;
}
}
/* { dg-final { scan-tree-dump-times "GOMP_parallel_loop_runtime" 3 "optimized" } } */
|
GB_unaryop__lnot_fp64_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp64_fp32
// op(A') function: GB_tran__lnot_fp64_fp32
// C type: double
// A type: float
// cast: double cij = (double) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp64_fp32
(
double *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp64_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Mumin pro.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <windows.h>
//This block of codes was made by M. Raihan Azhari//
//------------------------------------------------
struct amalan{
//variabel amanalan ibadah harian
int tahajud;
int dhuha;
int wajib;
int tilawah;
int tahfidz;
struct amalan *next;
};
typedef struct User{
//variabel data user pengguna aplikasi (hanya 1 user)
char nama[30];
int target_status;
int target_tahajud, target_dhuha, target_wajib;
int target_tilawah, target_tahfidz;
int day;
struct amalan *data;
}User;
typedef struct amalan Amalan;
typedef Amalan *Amalanptr;
//end of codes block
//----------------------------------------------------
void input_data(Amalanptr *sptr);
void input_menu();
void printAmalan(Amalanptr current, int day_removed[50], User *userptr);
void print_evaluasi(Amalanptr current, User *userptr , int day_removed[50]);
void removeptr (Amalanptr *startPtr, int day, int day_removed[50]);
int file_user_read (User *userptr, int day_removed[50]);
int file_user_write (User *userptr, int day_removed[50]);
int file_amalan_write (Amalanptr current, int day_removed[50]);
int file_amalan_read (Amalanptr *sptr, int day_removed[50], int i, int *posisi);
void welcome(User *userptr);
void help_mutabaah();
int file_removed_write(int day[50]);
int file_removed_read(int day[50]);
int main(){
FILE *fptr;
int menu, day, i, j, id_input,id, mutabaah, status, login, login_status, file_status, posisi;
int day_removed[50] = {}; //array untuk mengetahui hari yang telah di remove
//set variabel ke 0 untuk mencegah adanya garbage value
menu = 0;
mutabaah = 0;
id = 0;
//struct user dalam fungsi ini dapat dipanggil melalui pointer *userptr
User *userptr;
userptr = (User*) calloc(1, 2 * sizeof(User));
/*Tadinya kami ingin membuat multi-user berdasarka idnya, namun karena kesulitan pada file handling maka
kami membuat 1 user saja, namun tetap menggunakan pointer struct*/
Amalanptr startptr = NULL;
file_status = file_user_read(userptr, day_removed);
file_removed_read(day_removed);
posisi = 0;
if((userptr + id)->day == 0){
file_amalan_read(&startptr, day_removed, 0, &posisi);
}
else{
for(i = 0; i < (userptr + id)->day - 1; i++){
file_amalan_read(&startptr, day_removed, i, &posisi);
}
file_amalan_read(&startptr, day_removed, -1, &posisi);
}
(userptr + id)->data = startptr;
while (menu != -1){
welcome(userptr);
login_status = 1;
printf("\nMasukan angka: ");
scanf("%d", &menu);
system("cls");
mutabaah = 0;
switch (menu){
case 1:
//---------------------------------------------------
//case 1 (Mutaba'ah Yaumiah) was made by M. Raihan Azhari
while(mutabaah != -1){
help_mutabaah();
printf("\nMasukan pilihan metode mutabaah: ");
scanf("%d", &mutabaah);
system("cls");
switch (mutabaah){
case 1:
(userptr + id)->target_status = 1;
//coba masukin ke fungsi
//printf("\nuser ID: %d", id);
printf("\nMasukan Nama : ");
scanf("%s", &(userptr + id)->nama);
printf("\n Masukan Target Rakaat Tahajud: ");
scanf("%d", &(userptr + id)->target_tahajud);
printf("\n Masukan Target Rakaat Dhuha: ");
scanf("%d", &(userptr + id)->target_dhuha);
(userptr + id)->target_wajib = 5;
printf("\n Masukan Target Halaman Tilawah: ");
scanf("%d", &(userptr + id)->target_tilawah);
printf("\n Masukan Target Ayat Tahfidz: ");
scanf("%d", &(userptr + id)->target_tahfidz);
printf("\n\n");
system("pause");
system("cls");
break;
case 2:
//input mutabaah
//nanti mainin file handling disini
if((userptr + id)->target_status != 1){
printf("\nHarap masukan target terlebih dahulu\n\n");
system("pause");
system("cls");
break;
}
printf("\nAssalamualaikum %s", (userptr + id)->nama);
printf("\nMasukan jumlah hari yang akan diinput: ");
scanf("%d", &day);
for(i = 0; i < day; i++){
printf("\n\nAmalan hari ke-%d", (userptr->day) + i + 1);
input_data(&startptr);
}
(userptr + id)->data = startptr;
system("cls");
printf("\n\ninput berhasil !\n");
printAmalan((userptr + id)->data, day_removed, userptr);
printf("\n\n");
system("pause");
system("cls");
break;
case 3:
if((userptr + id)->target_status != 1){
printf("\nHarap masukan target terlebih dahulu\n\n");
system("pause");
system("cls");
break;
}
printAmalan((userptr + id)->data, day_removed, userptr);
printf("\n\n");
system("pause");
system("cls");
print_evaluasi((userptr + id)->data, userptr, day_removed);
printf("\n\n");
system("pause");
system("cls");
break;
case 4:
if((userptr + id)->target_status != 1){
printf("\nHarap masukan target terlebih dahulu\n\n");
system("pause");
system("cls");
break;
}
printf("Pilih hari yang akan dihapus: ");
scanf("%d", &day);
day--;
removeptr (&startptr, day, day_removed);
day_removed[day] = 1;
printf("\nData hari ke-%d berhasil dihapus\n\n", day+1);
system("pause");
system("cls");
}
}
//end of codes block
//---------------------------------------------------------
case 2:
//zakat
break;
case 3:
//waris
break;
}
}
file_user_write(userptr, day_removed);
file_amalan_write((userptr + id)->data, day_removed);
file_removed_write(day_removed);
return 0;
}
//--------------------------------------------------------
//This function was made by M. Raihan Azhari
void input_data(Amalanptr *sptr){
// int sks_var, kode_var, bobot_var, status_var, condition;
// char nilai_var;
Amalanptr currentptr;
Amalanptr newptr;
Amalanptr prevptr;
newptr = malloc(sizeof(Amalan));
//masukin kode input variabel apa aja disini
printf("\nMasukan amalan");
printf("\nRakaat Tahajud: ");
scanf("%d", &newptr->tahajud);
printf("Rakaat Dhuha: ");
scanf("%d", &newptr->dhuha);
printf("Banyak Sholat Wajib yang Dikerjakan: ");
scanf("%d", &newptr->wajib);
printf("Jumlah Halaman Tilawah: ");
scanf("%d", &newptr->tilawah);
printf("Jumlah Ayat Tahfidz: ");
scanf("%d", &newptr->tahfidz );
newptr->next = NULL;
prevptr = NULL;
currentptr = *sptr;
while(currentptr != NULL){
prevptr = currentptr;
currentptr = currentptr->next;
}
if (prevptr == NULL){
newptr->next = *sptr;
*sptr = newptr;
}
else{
prevptr->next = newptr;
newptr->next = currentptr;
}
}
//end of codes block
//----------------------------------------------------
//---------------------------------------------------
//This function was made by M. Raihan Azhari
void printAmalan(Amalanptr current, int day_removed[50], User *userptr){
//ini masih yang matkul blm gw ganti hehe
int counter;
counter = 0;
while(current != NULL){
if(day_removed[counter] == 1){
printf("\n\nRekap ibadah hari ke-%d telah dihapus \n", counter + 1);
}
else{
#pragma omp parallel
{
int tid;
tid = omp_get_thread_num();
#pragma omp single
{
printf("\n\nRekap ibadah hari ke-%d:", counter + 1);
}
#pragma omp taskwait
if(tid == 0){
printf("\nTahajud : %d Rakaat",current->tahajud);
printf("\nDhuha : %d Rakaat",current->dhuha);
printf("\nWajib : %d Waktu",current->wajib);
}
if (tid == 1){
printf("\nTilawah : %d Halaman",current->tilawah);
printf("\nTahfidz : %d Ayat",current->tahfidz);
}
#pragma omp taskwait
}
}
current = current->next;
counter++;
}
userptr->day = counter;
}
//end of codes block
//------------------------------------------------------------
//-------------------------------------------------------------
//this function was made by M. Raihan Azhari
void print_evaluasi(Amalanptr current, User *userptr , int day_removed[50]){
int counter, i;
int jumlah_tahajud, jumlah_dhuha, jumlah_wajib, jumlah_tilawah, jumlah_tahfidz, avg, tugas, step;
jumlah_tahajud = 0;
jumlah_dhuha = 0;
jumlah_wajib = 0;
jumlah_tilawah = 0;
jumlah_tahfidz = 0;
counter = 0;
while (current != NULL){
jumlah_tahajud += current->tahajud;
jumlah_dhuha += current->dhuha;
jumlah_wajib += current->wajib;
jumlah_tilawah += current->tilawah;
jumlah_tahfidz += current->tahfidz;
counter ++;
current = current->next;
}
userptr->day = counter;
if(counter == 0){
printf("Data masih kosong");
}
else{
printf("\n Evaluasi ibadah harian selama %d hari: ", counter);
tugas = 0;
step = 0;
#pragma omp parallel private(tugas, step)
{
#pragma omp master
{
for(i = step; i < 5; i++){
tugas = i;
step = i;
#pragma omp task
{
if (tugas == 0 && userptr->target_tahajud != 0){
float rata_tahajud = jumlah_tahajud / (float)counter;
float result_tahajud = rata_tahajud /(float) userptr->target_tahajud;
#pragma omp critical
{
printf("\n\n~~Tahajud~~ ");
printf("\nRata-rata Rakaat setiap harinya : %.2f", rata_tahajud);
printf("\nPersen ketercapaian target: %.2f %", result_tahajud * 100);
}
}
if(tugas == 1 && userptr->target_dhuha != 0){
float rata_dhuha = jumlah_dhuha / (float)counter;
float result_dhuha = rata_dhuha /(float)userptr->target_dhuha;
#pragma omp critical
{
printf("\n\n~~Dhuha~~ ");
printf("\nRata-rata Rakaat setiap harinya : %.2f", rata_dhuha);
printf("\nPersen ketercapaian target: %.2f %", result_dhuha * 100);
}
}
if(tugas == 2 && userptr->target_wajib != 0){
float rata_wajib = jumlah_wajib / (float)counter;
float result_wajib = rata_wajib /(float)userptr->target_wajib;
#pragma omp critical
{
printf("\n\n~~Sholat wajib 5 waktu~~ ");
printf("\nRata-rata Rakaat setiap harinya : %.2f", rata_wajib);
printf("\nPersen ketercapaian target: %.2f %", result_wajib * 100);
}
}
if(tugas == 3 && userptr->target_tilawah != 0){
float rata_tilawah = jumlah_tilawah / (float)counter;
float result_tilawah = rata_tilawah /(float)userptr->target_tilawah;
#pragma omp critical
{
printf("\n\n~~Tilawah~~ ");
printf("\nRata-rata Halaman setiap harinya : %.2f", rata_tilawah);
printf("\nPersen ketercapaian target: %.2f %", result_tilawah * 100);
}
}
if(tugas == 4 && userptr->target_tilawah != 0){
float rata_tahfidz = jumlah_tahfidz / (float)counter;
float result_tahfidz = rata_tahfidz /(float)userptr->target_tahfidz;
#pragma omp critical
{
printf("\n\n~~Tahfidz~~ ");
printf("\nRata-rata Halaman setiap harinya : %.2f", rata_tahfidz);
printf("\nPersen ketercapaian target: %.2f %", result_tahfidz * 100);
}
}
}
}
}
#pragma omp taskwait
}
}
}
//-----------------------------------------------------------------------------------
//-----------------------------------------------
//Muhammad Raihan Azhari
void removeptr (Amalanptr *startPtr, int day, int day_removed[50]){
Amalanptr prevPtr;
Amalanptr tempPtr;
Amalanptr currentPtr;
int i, hari;
day++;
if ( day == 0) {
tempPtr = *startPtr;
*startPtr = ( *startPtr )->next;
free( tempPtr );
}
else {
prevPtr = *startPtr;
currentPtr = ( *startPtr )->next;
for(i = 1; i < day; i++){
if(day_removed[i]== 1){
continue;
}
if (i == day) {
tempPtr = currentPtr;
prevPtr->next = currentPtr->next;
free( tempPtr );
}
prevPtr = currentPtr;
currentPtr = currentPtr->next;
if(currentPtr == NULL) {
break;
}
}
}
}
//------------------------------------------------------
//---------------------------------------------------
//Fikri Afif Musyaffa
int file_user_read (User *userptr, int day_removed[50]){
FILE *fptr;
fptr = fopen("userMuslim.txt", "r");
if(fptr == NULL){
// printf("\nFile user belmu dibuat");
fclose(fptr);
fptr = fopen("userMuslim.txt", "w");
fclose(fptr);
return 0;
}
else{
fseek(fptr, 0, SEEK_SET);
// printf("\nFile sudah dibuat");
fscanf(fptr, "\n%s", &userptr->nama);
fscanf(fptr, "\n%d", &userptr->target_status);
fscanf(fptr, "\n%d", &userptr->target_tahajud);
fscanf(fptr, "\n%d", &userptr->target_dhuha);
fscanf(fptr, "\n%d", &userptr->target_wajib);
fscanf(fptr, "\n%d", &userptr->target_tilawah);
fscanf(fptr, "\n%d", &userptr->target_tahfidz);
fscanf(fptr, "\n%d", &userptr->day);
}
fclose(fptr);
return 1;
}
//--------------------------------------------------
//---------------------------------------------------
//Fikri Afif Musyaffa
int file_user_write (User *userptr, int day_removed[50]){
FILE *fptr;
fptr = fopen("userMuslim.txt", "w");
fprintf(fptr, "\n%s", userptr->nama);
fprintf(fptr, "\n%d", userptr->target_status);
fprintf(fptr, "\n%d", userptr->target_tahajud);
fprintf(fptr, "\n%d", userptr->target_dhuha);
fprintf(fptr, "\n%d", userptr->target_wajib);
fprintf(fptr, "\n%d", userptr->target_tilawah);
fprintf(fptr, "\n%d", userptr->target_tahfidz);
fprintf(fptr, "\n%d", userptr->day);
fclose(fptr);
}
//---------------------------------------------------
//---------------------------------------------------
//M. Raihan Azhari
int file_amalan_read (Amalanptr *sptr, int day_removed[50], int i, int *posisi){
FILE *fptr;
int tahajud_var, dhuha_var, wajib_var, tilawah_var, tahfidz_var;
Amalanptr currentptr;
Amalanptr newptr;
Amalanptr prevptr;
newptr = malloc(sizeof(Amalan));
if(i == 0){
fptr = fopen("amalan.txt", "r");
}
if(fptr == NULL){
// printf("\nFile amalan belmu dibuat");
fclose(fptr);
fptr = fopen("userMuslim.txt", "w");
fclose(fptr);
return 0;
}
else{
if(i == 0){
fseek(fptr, 0, SEEK_SET);
}
else{
fseek(fptr, 0 , SEEK_CUR);
}
// printf("\nFile sudah dibuat");
fscanf(fptr,"\n%d",&newptr->tahajud);
fscanf(fptr,"\n%d",&newptr->dhuha);
fscanf(fptr,"\n%d",&newptr->wajib);
fscanf(fptr,"\n%d",&newptr->tilawah);
fscanf(fptr,"\n%d",&newptr->tahfidz);
newptr->next = NULL;
prevptr = NULL;
currentptr = *sptr;
while(currentptr != NULL){
prevptr = currentptr;
currentptr = currentptr->next;
}
if (prevptr == NULL){
newptr->next = *sptr;
*sptr = newptr;
}
else{
prevptr->next = newptr;
newptr->next = currentptr;
}
if(i == -1){
fclose(fptr);
}
return 1;
}
}
//--------------------------------------------------
//---------------------------------------------------
//M. Raihan Azhari
int file_amalan_write (Amalanptr current, int day_removed[50]){
FILE *fptr;
fptr = fopen("amalan.txt", "w");
while (current != NULL){
fprintf(fptr,"\n%d",current->tahajud);
fprintf(fptr,"\n%d",current->dhuha);
fprintf(fptr,"\n%d",current->wajib);
fprintf(fptr,"\n%d",current->tilawah);
fprintf(fptr,"\n%d",current->tahfidz);
current = current->next;
}
fclose(fptr);
}
//---------------------------------------------------
void welcome(User *userptr){
int i, tid;
#pragma omp for
for(i = 0; i < 60; i++){
printf("-");
}
#pragma omp barrier
printf("\n\t\t\t Mu'min Pro\n");
#pragma omp for
for(i = 0; i < 60 ;i++){
printf("-");
}
#pragma omp barrier
printf("\nAssalamu'alaikum %s", (userptr)->nama);
printf("\n\nMode Menu Mutabaah: ");
printf("\n1. Mutaba'ah Yaumiah (Evaluasi Ibadah Harian)");
printf("\n2. untuk Kalkulator Perhitungan Zakat");
printf("\n3. untuk Kalkulator Perhitungan Waris\n\n" );
}
void help_mutabaah(){
printf("\n1. Input Target Mutabaah");
printf("\n2. Input Ibadah Harian");
printf("\n3. Lihat Evaluasi Ibadah Harian");
printf("\n4. Menghapus Ibadah Harian");
printf("\n-1 Untuk keluar program");
}
int file_removed_write(int day[50]){
FILE *fptr;
fptr = fopen("removeday.txt", "w");
int i;
for(i = 0; i < 50; i++){
fprintf(fptr, "\n%d", day[i]);
}
fclose(fptr);
}
int file_removed_read(int day[50]){
FILE *fptr;
fptr = fopen("removeday.txt", "r");
if (fptr == NULL){
fclose(fptr);
fptr = fopen("removeday.txt", "w");
fclose (fptr);
return 0;
}
else{
int i;
for(i = 0 ; i < 50; i++){
fscanf(fptr, "\n%d", &day[i]);
}
return 1;
}
}
|
FullyDistSpVec.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.6 -------------------------------------------------*/
/* date: 6/15/2017 ---------------------------------------------*/
/* authors: Ariful Azad, Aydin Buluc --------------------------*/
/****************************************************************/
/*
Copyright (c) 2010-2017, The Regents of the University of California
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _FULLY_DIST_SP_VEC_H_
#define _FULLY_DIST_SP_VEC_H_
#include <iostream>
#include <vector>
#include <utility>
#include "CommGrid.h"
#include "promote.h"
#include "SpParMat.h"
#include "FullyDist.h"
#include "Exception.h"
#include "OptBuf.h"
#include "CombBLAS.h"
namespace combblas {
template <class IT, class NT, class DER>
class SpParMat;
template <class IT>
class DistEdgeList;
template <class IU, class NU>
class FullyDistVec;
template <class IU, class NU>
class SparseVectorLocalIterator;
/**
* A sparse vector of length n (with nnz <= n of them being nonzeros) is distributed to
* "all the processors" in a way that "respects ordering" of the nonzero indices
* Example: x = [5,1,6,2,9] for nnz(x)=5 and length(x)=12
* we use 4 processors P_00, P_01, P_10, P_11
* Then P_00 owns [1,2] (in the range [0,...,2]), P_01 ow`ns [5] (in the range [3,...,5]), and so on.
* In the case of A(v,w) type sparse matrix indexing, this doesn't matter because n = nnz
* After all, A(v,w) will have dimensions length(v) x length (w)
* v and w will be of numerical type (NT) "int" and their indices (IT) will be consecutive integers
* It is possibly that nonzero counts are distributed unevenly
* Example: x=[1,2,3,4,5] and length(x) = 20, then P_00 would own all the nonzeros and the rest will hold empry vectors
* Just like in SpParMat case, indices are local to processors (they belong to range [0,...,length-1] on each processor)
* \warning Always create vectors with the right length, setting elements won't increase its length (similar to operator[] on std::vector)
**/
template <class IT, class NT>
class FullyDistSpVec: public FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>
{
public:
FullyDistSpVec ( );
explicit FullyDistSpVec ( IT glen );
FullyDistSpVec ( std::shared_ptr<CommGrid> grid);
FullyDistSpVec ( std::shared_ptr<CommGrid> grid, IT glen);
template <typename _UnaryOperation>
FullyDistSpVec (const FullyDistVec<IT,NT> & rhs, _UnaryOperation unop);
FullyDistSpVec (const FullyDistVec<IT,NT> & rhs); // Conversion copy-constructor
FullyDistSpVec (IT globalsize, const FullyDistVec<IT,IT> & inds, const FullyDistVec<IT,NT> & vals, bool SumDuplicates = false);
FullyDistSpVec (std::shared_ptr<CommGrid> grid, IT globallen, const std::vector<IT>& indvec, const std::vector<NT> & numvec, bool SumDuplicates = false, bool sorted=false);
IT NnzUntil() const;
FullyDistSpVec<IT,NT> Invert (IT globallen);
template <typename _BinaryOperationIdx, typename _BinaryOperationVal, typename _BinaryOperationDuplicate>
FullyDistSpVec<IT,NT> Invert (IT globallen, _BinaryOperationIdx __binopIdx, _BinaryOperationVal __binopVal, _BinaryOperationDuplicate __binopDuplicate);
template <typename _BinaryOperationIdx, typename _BinaryOperationVal>
FullyDistSpVec<IT,NT> InvertRMA (IT globallen, _BinaryOperationIdx __binopIdx, _BinaryOperationVal __binopVal);
template <typename NT1, typename _UnaryOperation>
void Select (const FullyDistVec<IT,NT1> & denseVec, _UnaryOperation unop);
template <typename _UnaryOperation>
void FilterByVal (FullyDistSpVec<IT,IT> Selector, _UnaryOperation __unop, bool filterByIndex);
template <typename NT1>
void Setminus (const FullyDistSpVec<IT,NT1> & other);
//template <typename NT1, typename _UnaryOperation>
//void Set (FullyDistSpVec<IT,NT1> Selector, _UnaryOperation __unop);
template <typename NT1, typename _UnaryOperation, typename _BinaryOperation>
void SelectApply (const FullyDistVec<IT,NT1> & denseVec, _UnaryOperation __unop, _BinaryOperation __binop);
//! like operator=, but instead of making a deep copy it just steals the contents.
//! Useful for places where the "victim" will be distroyed immediately after the call.
void stealFrom(FullyDistSpVec<IT,NT> & victim);
FullyDistSpVec<IT,NT> & operator=(const FullyDistSpVec< IT,NT > & rhs);
FullyDistSpVec<IT,NT> & operator=(const FullyDistVec< IT,NT > & rhs); // convert from dense
FullyDistSpVec<IT,NT> & operator=(NT fixedval) // assign fixed value
{
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(size_t i=0; i < ind.size(); ++i)
num[i] = fixedval;
return *this;
}
FullyDistSpVec<IT,NT> & operator+=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistSpVec<IT,NT> & operator-=(const FullyDistSpVec<IT,NT> & rhs);
class ScalarReadSaveHandler
{
public:
NT getNoNum(IT index) { return static_cast<NT>(1); }
template <typename c, typename t>
NT read(std::basic_istream<c,t>& is, IT index)
{
NT v;
is >> v;
return v;
}
template <typename c, typename t>
void save(std::basic_ostream<c,t>& os, const NT& v, IT index)
{
os << v;
}
};
template <class HANDLER>
void ParallelWrite(const std::string & filename, bool onebased, HANDLER handler, bool includeindices = true, bool includeheader = false);
void ParallelWrite(const std::string & filename, bool onebased, bool includeindices = true) { ParallelWrite(filename, onebased, ScalarReadSaveHandler(), includeindices); };
template <typename _BinaryOperation>
void ParallelRead (const std::string & filename, bool onebased, _BinaryOperation BinOp);
//! Totally obsolete version that only accepts an ifstream object and ascii files
template <class HANDLER>
std::ifstream& ReadDistribute (std::ifstream& infile, int master, HANDLER handler);
std::ifstream& ReadDistribute (std::ifstream& infile, int master) { return ReadDistribute(infile, master, ScalarReadSaveHandler()); }
template <class HANDLER>
void SaveGathered(std::ofstream& outfile, int master, HANDLER handler, bool printProcSplits = false);
void SaveGathered(std::ofstream& outfile, int master) { SaveGathered(outfile, master, ScalarReadSaveHandler()); }
template <typename NNT> operator FullyDistSpVec< IT,NNT > () const //!< Type conversion operator
{
FullyDistSpVec<IT,NNT> CVT(commGrid);
CVT.ind = std::vector<IT>(ind.begin(), ind.end());
CVT.num = std::vector<NNT>(num.begin(), num.end());
CVT.glen = glen;
return CVT;
}
bool operator==(const FullyDistSpVec<IT,NT> & rhs) const
{
FullyDistVec<IT,NT> v = *this;
FullyDistVec<IT,NT> w = rhs;
return (v == w);
}
void PrintInfo(std::string vecname) const;
void iota(IT globalsize, NT first);
void nziota(NT first);
FullyDistVec<IT,NT> operator() (const FullyDistVec<IT,IT> & ri) const; //!< SpRef (expects ri to be 0-based)
void SetElement (IT indx, NT numx); // element-wise assignment
void DelElement (IT indx); // element-wise deletion
NT operator[](IT indx);
bool WasFound() const { return wasFound; }
//! sort the vector itself, return the permutation vector (0-based)
FullyDistSpVec<IT, IT> sort();
#if __cplusplus > 199711L
template <typename _BinaryOperation = minimum<NT> >
FullyDistSpVec<IT, NT> Uniq(_BinaryOperation __binary_op = _BinaryOperation(), MPI_Op mympiop = MPI_MIN);
#else
template <typename _BinaryOperation >
FullyDistSpVec<IT, NT> Uniq(_BinaryOperation __binary_op, MPI_Op mympiop);
#endif
// Aydin TODO: parallelize with OpenMP
template <typename _UnaryOperation>
FullyDistSpVec<IT,NT> Prune(_UnaryOperation __unary_op, bool inPlace = true) //<! Prune any nonzero entries for which the __unary_op evaluates to true (solely based on value)
{
FullyDistSpVec<IT,NT> temp(commGrid);
IT spsize = ind.size();
for(IT i=0; i< spsize; ++i)
{
if(!(__unary_op(num[i]))) // keep this nonzero
{
temp.ind.push_back(ind[i]);
temp.num.push_back(num[i]);
}
}
if (inPlace)
{
ind.swap(temp.ind);
ind.swap(temp.num);
return FullyDistSpVec<IT,NT>(commGrid); // return blank to match signature
}
else
{
return temp;
}
}
IT getlocnnz() const
{
return ind.size();
}
IT getnnz() const
{
IT totnnz = 0;
IT locnnz = ind.size();
MPI_Allreduce( &locnnz, &totnnz, 1, MPIType<IT>(), MPI_SUM, commGrid->GetWorld());
return totnnz;
}
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::LengthUntil;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::MyLocLength;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::MyRowLength;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::TotalLength;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::Owner;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::RowLenUntil;
void setNumToInd()
{
IT offset = LengthUntil();
IT spsize = ind.size();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i< spsize; ++i)
num[i] = ind[i] + offset;
}
template <typename _Predicate>
IT Count(_Predicate pred) const; //!< Return the number of elements for which pred is true
template <typename _UnaryOperation>
void Apply(_UnaryOperation __unary_op)
{
//transform(num.begin(), num.end(), num.begin(), __unary_op);
IT spsize = num.size();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i < spsize; ++i)
num[i] = __unary_op(num[i]);
}
template <typename _BinaryOperation>
void ApplyInd(_BinaryOperation __binary_op)
{
IT offset = LengthUntil();
IT spsize = ind.size();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; i < spsize; ++i)
num[i] = __binary_op(num[i], ind[i] + offset);
}
template <typename _BinaryOperation>
NT Reduce(_BinaryOperation __binary_op, NT init) const;
template <typename OUT, typename _BinaryOperation, typename _UnaryOperation>
OUT Reduce(_BinaryOperation __binary_op, OUT default_val, _UnaryOperation __unary_op) const;
void DebugPrint();
std::shared_ptr<CommGrid> getcommgrid() const { return commGrid; }
void Reset();
NT GetLocalElement(IT indx);
void BulkSet(IT inds[], int count);
std::vector<IT> GetLocalInd (){std::vector<IT> rind = ind; return rind;};
std::vector<NT> GetLocalNum (){std::vector<NT> rnum = num; return rnum;};
template <typename _Predicate>
FullyDistVec<IT,IT> FindInds(_Predicate pred) const;
template <typename _Predicate>
FullyDistVec<IT,NT> FindVals(_Predicate pred) const;
protected:
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::glen;
using FullyDist<IT,NT,typename combblas::disable_if< combblas::is_boolean<NT>::value, NT >::type>::commGrid;
private:
std::vector< IT > ind; // ind.size() give the number of nonzeros
std::vector< NT > num;
bool wasFound; // true if the last GetElement operation returned an actual value
template <typename _BinaryOperation>
void SparseCommon(std::vector< std::vector < std::pair<IT,NT> > > & data, _BinaryOperation BinOp);
#if __cplusplus > 199711L
template <typename _BinaryOperation = minimum<NT> >
FullyDistSpVec<IT, NT> UniqAll2All(_BinaryOperation __binary_op = _BinaryOperation(), MPI_Op mympiop = MPI_MIN);
#else
template <typename _BinaryOperation >
FullyDistSpVec<IT, NT> UniqAll2All(_BinaryOperation __binary_op, MPI_Op mympiop);
#endif
template <class IU, class NU>
friend class FullyDistSpVec;
template <class IU, class NU>
friend class FullyDistVec;
template <class IU, class NU, class UDER>
friend class SpParMat;
template <class IU, class NU>
friend class SparseVectorLocalIterator;
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
friend FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote>
SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x );
template <typename SR, typename IU, typename NUM, typename UDER>
friend FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote>
SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue);
template <typename VT, typename IU, typename UDER> // NoSR version (in BFSFriends.h)
friend FullyDistSpVec<IU,VT> SpMV (const SpParMat<IU,bool,UDER> & A, const FullyDistSpVec<IU,VT> & x, OptBuf<int32_t, VT > & optbuf);
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
friend void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y,bool indexisvalue, OptBuf<int32_t, OVT > & optbuf);
template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER>
friend void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y,bool indexisvalue, OptBuf<int32_t, OVT > & optbuf, PreAllocatedSPA<OVT> & SPA);
template <typename IU, typename NU1, typename NU2>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply_threaded (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp);
template <typename IU>
friend void RandPerm(FullyDistSpVec<IU,IU> & V); // called on an existing object, randomly permutes it
template <typename IU>
friend void RenameVertices(DistEdgeList<IU> & DEL);
//! Helper functions for sparse matrix X sparse vector
// Ariful: I made this an internal function in ParFriends.h
//template <typename SR, typename IU, typename OVT>
//friend void MergeContributions(FullyDistSpVec<IU,OVT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, OVT * & recvnumbuf, int rowneighs);
template <typename IU, typename VT>
friend void MergeContributions(FullyDistSpVec<IU,VT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, VT * & recvnumbuf, int rowneighs);
template<typename IU, typename NV>
friend void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue);
template <class IU, class NU, class DER, typename _UnaryOperation>
friend SpParMat<IU, bool, DER> PermMat1 (const FullyDistSpVec<IU,NU> & ri, const IU ncol, _UnaryOperation __unop);
};
}
#include "FullyDistSpVec.cpp"
#endif
|
kvstore_dist_server.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file mxnet_node.h
* \brief implement mxnet nodes
*/
#ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#include <queue>
#include <string>
#include <mutex>
#include <condition_variable>
#include <memory>
#include <functional>
#include <future>
#include <vector>
#include <chrono>
#include <set>
#include "ps/ps.h"
#include "mxnet/kvstore.h"
#include "../operator/tensor/elemwise_binary_op-inl.h"
#include "../operator/tensor/init_op.h"
#include <fstream>
#include <unistd.h>
#include <ctime>
namespace mxnet {
namespace kvstore {
enum class CommandType {
kController, kStopServer, kSyncMode, kSetGradientCompression, kGetParaInfo, kSetParams, kStartTraining, kRequestParaInfo, kGetStragInfo,kRedistribute
}; // further define kGetParaInfo
enum class DataHandleType {
kDefaultPushPull, kCompressedPushPull, kRowSparsePushPull, kMoveParams
};
struct ParamInfo {
double speed;
int step;
// server_id:<key,size><key,size>
std::unordered_map<int, std::vector<std::pair<int, int>>> ps_kvs;
};
/*
std::string split implementation by using delimiter as a character.
*/
std::vector<std::string> SplitStr(const std::string strToSplit, char delimeter)
{
std::stringstream ss(strToSplit);
std::string item;
std::vector<std::string> splittedStrings;
while (std::getline(ss, item, delimeter))
{
splittedStrings.push_back(item);
}
return splittedStrings;
}
// convert ParamInfo to string
// string: "speed step server_id,key:size,key:size, server_id,key:size,key:size, "
std::string ParamInfoToString(const ParamInfo& param_info){
std::string body;
const auto& speed_str = std::to_string(param_info.speed);
body.append(speed_str).append(" ");
const auto& step_str = std::to_string(param_info.step);
body.append(step_str).append(" ");
LOG(INFO) << "Converting ParamInfo to string";
for(const auto& server_kvs : param_info.ps_kvs){
int server_id = server_kvs.first;
body.append(std::to_string(server_id)).append(",");
const std::vector<std::pair<int, int>>& kvs = server_kvs.second;
for (const auto& elem : kvs){
int key = elem.first; // the key after decoding
int size = elem.second;
auto key_size_str = std::to_string(key).append(":").append(std::to_string(size));
body.append(key_size_str).append(",");
}
body.append(" ");
}
return body;
}
/*ParamInfo StringToParamInfo_Scaling(const std::string& body){
ParamInfo param_info;
std::vector<std::string> ps_metas = SplitStr(body,' ');
param_info.speed = std::stod(ps_metas.at(1));
param_info.step = std::stoi(ps_metas.at(2));
std::vector<std::string> key_map_metas = SplitStr(ps_metas.at(0),'_');
for(int i=0;i<key_map_metas.size();i++){
auto& keymap = key_map_metas.at(i);
if(keymap.size()){
std::vector<std::string> keymap_strs = SplitStr(keymap,',');
key_map_[std::stoi(keymap_strs.at(0))] = std::stoi(keymap_strs.at(1));
}
}
for(size_t i=3; i<ps_metas.size(); i++){
auto& ps_meta = ps_metas.at(i);
if (ps_meta.size()){ // the last elem is empty str
std::vector<std::string> key_size_strs = SplitStr(ps_meta, ',');
int server_id = std::stoi(key_size_strs.at(0));
for (size_t j=1; j<key_size_strs.size(); j++){
auto& key_size_str = key_size_strs.at(j);
if(key_size_str.size()){ // the last elem is empty str ,
std::vector<std::string> key_size = SplitStr(key_size_str, ':');
int key = std::stoi(key_size.at(0));
int size = std::stoi(key_size.at(1));
param_info.ps_kvs[server_id].push_back(std::make_pair(key,size));
}
}
}
}
return param_info;
}*/
// convert string to ParamInfo
ParamInfo StringToParamInfo(const std::string& body){
ParamInfo param_info;
std::vector<std::string> ps_metas = SplitStr(body, ' ');
param_info.speed = std::stod(ps_metas.at(0));
param_info.step = std::stoi(ps_metas.at(1));
LOG(INFO) << "Converting string to ParamInfo";
for(size_t i=2; i<ps_metas.size(); i++){
auto& ps_meta = ps_metas.at(i);
if (ps_meta.size()){ // the last elem is empty str
std::vector<std::string> key_size_strs = SplitStr(ps_meta, ',');
int server_id = std::stoi(key_size_strs.at(0));
for (size_t j=1; j<key_size_strs.size(); j++){
auto& key_size_str = key_size_strs.at(j);
if(key_size_str.size()){ // the last elem is empty str ,
std::vector<std::string> key_size = SplitStr(key_size_str, ':');
int key = std::stoi(key_size.at(0));
int size = std::stoi(key_size.at(1));
param_info.ps_kvs[server_id].push_back(std::make_pair(key,size));
}
}
}
}
return param_info;
}
// update node_ids_
void UpdateMetas(int scaling_server_id){
CHECK(scaling_server_id);
LOG(INFO) << "Updating Postoffice::node_ids_ information";
std::cerr<<"Node_id: "<<ps::MyID()<<" num_servers before update: "<<
ps::NumServers()<<" scaling_server_id"<< scaling_server_id<<std::endl;
if (scaling_server_id > 0){
ps::Postoffice::Get()->UpdateNodeIDs(scaling_server_id, true);
} else {
ps::Postoffice::Get()->UpdateNodeIDs(-scaling_server_id, false);
}
std::cerr<<"num_servers after: "<<ps::NumServers()<<std::endl;
}
/**
* \brief executor runs a function using the thread called \ref Start
*/
class Executor {
public:
/**
* \brief start the executor
*/
void Start() {
std::unique_lock<std::mutex> lk(mu_);
while (true) {
cond_.wait(lk, [this]{return !queue_.empty();});
Block blk = std::move(queue_.front());
queue_.pop();
lk.unlock();
if (blk.f) {
blk.f(); blk.p->set_value();
} else {
blk.p->set_value(); break;
}
lk.lock();
}
}
/**
* \brief function
*/
typedef std::function<void()> Func;
/**
* \brief let the thread called \ref Start to exec a function. threadsafe
*/
void Exec(const Func& func) {
Block blk(func);
auto fut = blk.p->get_future();
{
std::lock_guard<std::mutex> lk(mu_);
queue_.push(std::move(blk));
cond_.notify_one();
}
fut.wait();
}
/**
* \brief stop the thread, threadsafe
*/
void Stop() {
Exec(Func());
}
private:
struct Block {
explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { }
Func f;
std::shared_ptr<std::promise<void>> p;
};
std::queue<Block> queue_;
std::mutex mu_;
std::condition_variable cond_;
};
class KVStoreDistServer {
public:
KVStoreDistServer() {
using namespace std::placeholders;
resp_scaling.sender = 0;
LOG(INFO) << "CREATE SERVER, RESP_SCALING.SENDER "<<resp_scaling.sender;
ps_server_ = new ps::KVServer<float>(0);
static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle(
std::bind(&KVStoreDistServer::RequestCommandHandle, this, _1, _2));
ps_server_->set_request_handle(
std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3));
// sync_mode_ = false;
sync_mode_ = true;
gradient_compression_ = std::make_shared<GradientCompression>();
log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false);
}
~KVStoreDistServer() {
delete ps_server_;
}
void set_controller(const KVStore::Controller& controller) {
CHECK(controller);
controller_ = controller;
}
void set_updater(const KVStore::Updater& updater) {
CHECK(updater);
updater_ = updater;
}
/**
* \brief blocked until received the command \a kSyncMode
*/
void Run() {
exec_.Start();
}
private:
struct MergeBuf {
std::vector<ps::KVMeta> request;
NDArray array;
};
void RequestCommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) {
LOG(INFO) << "Server " << ps::MyID() << " gets command " << recved.head;
bool respHere = true;
CommandType recved_type = static_cast<CommandType>(recved.head);
if (recved_type == CommandType::kStopServer) {
LOG(INFO) << "Server " << ps::MyID() << " is exiting";
app->Response(recved);
respHere = false;
exec_.Stop();
} else if (recved_type == CommandType::kSyncMode) {
LOG(INFO) << "Server " << ps::MyID() << " is set to synchronous training";
sync_mode_ = true;
//app->Response(recved);
//std::cerr << "Send response to command type2 " << recved.head;
} else if (recved_type == CommandType::kSetGradientCompression) {
gradient_compression_->DecodeParams(recved.body);
} else if (recved_type == CommandType::kGetParaInfo) { // get parameters information, e.g., size, iteration
GetParaInfo(recved, app);
respHere = false;
} else if (recved_type == CommandType::kSetParams) {
SetParams(recved, app);
respHere = false;
} else {
// this uses value 0 for message id from frontend
// let the main thread to execute ctrl, which is necessary for python
LOG(INFO) << "Server " << ps::MyID() << " is set optimizer";
exec_.Exec([this, recved]() {
CHECK(controller_);
controller_(recved.head, recved.body);
});
}
if (respHere) {
app->Response(recved);
std::cerr << "Send response to command type " << recved.head << std::endl;
}
}
void DataHandleEx(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t>& req_data,
ps::KVServer<real_t>* server) {
DataHandleType recved_type = static_cast<DataHandleType>(req_meta.cmd);
if (recved_type == DataHandleType::kRowSparsePushPull) {
DataHandleRowSparse(req_meta, req_data, server);
} else if (recved_type == DataHandleType::kCompressedPushPull) {
DataHandleCompressed(req_meta, req_data, server);
} else if (recved_type == DataHandleType::kMoveParams) {
// handle parameter movement request and response here
DataHandleMoveParams(req_meta, req_data, server);
} else {
AdvancedDataHandleDefault(req_meta, req_data, server);
}
return;
}
inline void ApplyUpdates(const int key, MergeBuf *merged, NDArray *stored,
ps::KVServer<real_t>* server) {
// no problem with this if condition when scaling workers
if (merged->request.size() == (size_t) ps::NumWorkers()) {
// let the main thread to execute updater_, which is necessary for python
if (updater_) {
exec_.Exec([this, key, merged, stored](){
CHECK(updater_);
updater_(key, merged->array, stored);
});
} else {
// if no updater, just copy
CopyFromTo(merged->array, stored);
}
if (log_verbose_) {
LOG(INFO) << "sync response to " << merged->request.size() << " workers";
}
for (const auto& req : merged->request) {
server->Response(req);
}
merged->request.clear();
//Waits until all previous write operations on the array are finished.
//This method guarantees that all previous write operations
//that pushed into the backend engine for execution are actually finished.
stored->WaitToRead();
// add a counter here to calculate the iteration for sync updates
// e.g., counter[key] += 1
steps_[key] += 1;
SpeedoMeter(steps_[key]);
if (steps_[key] == start_scaling_step){
num_pull_key_[key] = 0;
}
//CheckIsScaling(key, server);
std::cerr << "************************************";
std::cerr << "Update parameters on server " << ps::MyID() << std::endl;
} else {
merged->array.WaitToRead();
}
}
void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices,
const int64_t master_key, const int64_t num_rows) {
indices[0] = 0;
for (int64_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(keys[i]);
auto row_id = key - master_key;
indices[i - 1] = row_id;
}
}
void DataHandleRowSparse(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t>& req_data,
ps::KVServer<real_t>* server) {
int master_key = DecodeKey(req_data.keys[0]);
auto num_rows = req_data.keys.size() - 1;
auto& stored = store_[master_key];
if (req_meta.push) {
CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty";
CHECK_EQ(req_data.lens[0], 0);
real_t* data = req_data.vals.data();
if (stored.is_none()) {
if (log_verbose_) LOG(INFO) << "initial push: " << master_key;
// initialization
CHECK_GT(num_rows, 0) << "init with empty data is not supported";
auto unit_len = req_data.lens[1];
CHECK_GT(unit_len, 0);
size_t ds[] = {num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
CHECK_EQ(req_data.vals.size(), num_rows * unit_len);
TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*)
NDArray recved = NDArray(recv_blob, 0);
stored = NDArray(kRowSparseStorage, dshape, Context());
Engine::Get()->PushAsync(
[recved, stored](RunContext ctx, Engine::CallbackOnComplete on_complete) {
NDArray rsp = stored;
stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])});
mshadow::Stream<cpu> *s = ctx.get_stream<cpu>();
op::PopulateFullIdxRspImpl(s, &rsp);
mshadow::Copy(rsp.data().FlatTo1D<cpu, float>(),
recved.data().FlatTo1D<cpu, float>(), s);
on_complete();
}, recved.ctx(), {recved.var()}, {stored.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
stored.WaitToRead();
server->Response(req_meta);
return;
}
// synced push
if (sync_mode_) {
if (log_verbose_) LOG(INFO) << "sync push: " << master_key << " " << req_data.keys;
auto& merged = merge_buf_[master_key];
if (merged.array.is_none()) {
merged.array = NDArray(kRowSparseStorage, stored.shape(), Context());
}
if (num_rows == 0) {
// reset to zeros
if (merged.request.size() == 0) {
merged.array = NDArray(kRowSparseStorage, stored.shape(), Context());
} else {
// nothing to aggregate
}
merged.request.push_back(req_meta);
ApplyUpdates(master_key, &merged, &stored, server);
return;
}
auto unit_len = req_data.lens[1];
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
// data
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*)
// row_sparse NDArray
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
if (merged.request.size() == 0) {
CopyFromTo(recved, &merged.array, 0);
} else {
NDArray out(kRowSparseStorage, stored.shape(), Context());
std::vector<Engine::VarHandle> const_vars;
const_vars.push_back(recved.var());
const_vars.push_back(merged.array.var());
// accumulate row_sparse gradients
// TODO(haibin) override + operator for row_sparse NDArray
// instead of calling BinaryComputeRspRsp directly
using namespace mshadow;
Engine::Get()->PushAsync(
[recved, merged, out](RunContext ctx, Engine::CallbackOnComplete on_complete) {
op::ElemwiseBinaryOp::ComputeEx<cpu, mshadow::op::plus>(
{}, {}, {recved, merged.array}, {kWriteTo}, {out});
on_complete();
}, recved.ctx(), const_vars, {out.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
CopyFromTo(out, &merged.array, 0);
}
merged.request.push_back(req_meta);
ApplyUpdates(master_key, &merged, &stored, server);
} else {
// async push
if (log_verbose_) LOG(INFO) << "async push: " << master_key;
if (num_rows == 0) {
server->Response(req_meta);
return;
}
auto unit_len = req_data.lens[1];
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*)
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
exec_.Exec([this, master_key, &recved, &stored](){
CHECK(updater_);
updater_(master_key, recved, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else {
// pull
if (log_verbose_) LOG(INFO) << "pull: " << master_key;
ps::KVPairs<real_t> response;
if (num_rows == 0) {
std::vector<int> lens(req_data.keys.size(), 0);
response.keys = req_data.keys;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
return;
}
CHECK(!stored.is_none()) << "init " << master_key << " first";
auto shape = stored.shape();
auto unit_len = shape.ProdShape(1, shape.ndim());
const float* data = stored.data().dptr<float>();
auto len = unit_len * num_rows;
// concat values
response.vals.resize(len);
#pragma omp parallel for
for (size_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(req_data.keys[i]);
int64_t row_id = key - master_key;
const auto src = data + row_id * unit_len;
auto begin = (i - 1) * unit_len;
auto end = i * unit_len;
response.vals.segment(begin, end).CopyFrom(src, unit_len);
}
// setup response
response.keys = req_data.keys;
std::vector<int> lens(req_data.keys.size(), unit_len);
lens[0] = 0;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
}
}
void DefaultStorageResponse(int key, const NDArray& stored,
const ps::KVMeta& req_meta,
const ps::KVPairs<real_t> &req_data,
ps::KVServer<real_t>* server) {
// worker 0: push, pull then start training
// other worker: pull then start training
// vers on servers: not count the first from worker 0
ps::KVPairs<real_t> response;
CHECK(!stored.is_none()) << "init " << key << " first";
auto len = stored.shape().Size();
response.keys = req_data.keys;
response.lens = {len};
// TODO(mli) try to remove this CopyFrom
response.vals.CopyFrom(static_cast<const float*>(stored.data().dptr_), len);
auto step = steps_[key];
response.vers = {step};
server->Response(req_meta, response);
//LOG(INFO) << "Server " << ps::MyID() << " sends back parameters with key " << key << " vers: " << step;
if (step == start_scaling_step){
num_pull_key_[key] ++;
if (num_pull_key_[key] == ps::NumWorkers()){
end_pulls_++;
if (end_pulls_ == store_.size()) { // pull for all keys is finished
num_pull_key_.clear();
end_pulls_ = 0;
MoveParams(server);
}
}
}
}
void DataHandleCompressed(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t> &req_data,
ps::KVServer<real_t>* server) {
if (req_meta.push) {
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
// first for dummy key which represents original size of array, whose len is 0
CHECK_EQ(req_data.keys.size(), (size_t)2);
CHECK_EQ(req_data.lens.size(), (size_t)2);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]);
int original_size = DecodeKey(req_data.keys[0]);
int key = DecodeKey(req_data.keys[1]);
auto& stored = store_[key];
size_t ds[] = {(size_t)req_data.lens[1]};
TShape dshape(ds, ds + 1);
TBlob recv_blob((real_t*) req_data.vals.data(), // NOLINT(*)
dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0);
NDArray decomp_buf = decomp_buf_[key];
dshape = TShape{(int64_t) original_size};
if (decomp_buf.is_none()) {
decomp_buf = NDArray(dshape, Context());
}
if (stored.is_none()) {
stored = NDArray(dshape, Context());
gradient_compression_->Dequantize(recved, &stored, 0);
server->Response(req_meta);
stored.WaitToRead();
} else if (sync_mode_) {
// synced push
auto& merged = merge_buf_[key];
if (merged.array.is_none()) {
merged.array = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
gradient_compression_->Dequantize(recved, &merged.array, 0);
} else {
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
merged.array += decomp_buf;
}
merged.request.push_back(req_meta);
ApplyUpdates(key, &merged, &stored, server);
} else {
// async push
gradient_compression_->Dequantize(recved, &decomp_buf, 0);
exec_.Exec([this, key, &decomp_buf, &stored]() {
CHECK(updater_);
updater_(key, decomp_buf, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else { // pull
CHECK_EQ(req_data.keys.size(), (size_t)1);
CHECK_EQ(req_data.lens.size(), (size_t)0);
int key = DecodeKey(req_data.keys[0]);
DefaultStorageResponse(key, store_[key], req_meta, req_data, server);
}
}
// can only handle request with 1 key
void DataHandleDefault(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t> &req_data,
ps::KVServer<real_t>* server) {
CHECK_EQ(req_meta.cmd, static_cast<int>(DataHandleType::kDefaultPushPull));
// do some check
CHECK_EQ(req_data.keys.size(), (size_t)1) << "ERROR: Node " << ps::MyID()
<< " has keys " << req_data.keys[0] << " with len " << req_data.lens[0] <<
", " << req_data.keys[1] << " with len " << req_data.lens[1] << " total value size: " << req_data.vals.size();
if (req_meta.push) {
CHECK_EQ(req_data.lens.size(), (size_t)1);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]);
}
int key = DecodeKey(req_data.keys[0]);
auto& stored = store_[key];
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
if (req_meta.push) {
LOG(INFO) << "Server " << ps::MyID() << " received gradients with key " << key;
size_t ds[] = {(size_t)req_data.lens[0]};
TShape dshape(ds, ds + 1);
TBlob recv_blob((real_t*)req_data.vals.data(), // NOLINT(*)
dshape, cpu::kDevMask); //cpu
NDArray recved = NDArray(recv_blob, 0); // create NDArray that shares data with TBlob, 0 is dev id
if (stored.is_none()) {
// initialization
stored = NDArray(dshape, Context());
CopyFromTo(recved, &stored, 0);
server->Response(req_meta);
stored.WaitToRead();
} else if (sync_mode_) {
// synced push
auto& merged = merge_buf_[key];
if (merged.array.is_none()) {
merged.array = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
CopyFromTo(recved, &merged.array, 0);
} else {
merged.array += recved;
}
merged.request.push_back(req_meta);
ApplyUpdates(key, &merged, &stored, server);
} else {
// async push
exec_.Exec([this, key, &recved, &stored](){
CHECK(updater_);
updater_(key, recved, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
// add a counter here to calculate the iteration for async updates
// e.g., counter[key] += 1
}
} else {
DefaultStorageResponse(key, stored, req_meta, req_data, server);
}
}
void PushHandle(const ps::KVMeta& req_meta,
const std::vector<ps::KVPairs<real_t>>& kvpairs,
ps::KVServer<real_t>* server){
// merge/update
std::vector<int> decoded_keys;
bool apply_updates = false;
bool no_reply = false;
for(const ps::KVPairs<real_t> &kvpair : kvpairs){
auto steps = steps_[kvpair.keys[0]];
int key = 0;
key = DecodeKey(kvpair.keys[0]);
decoded_keys.push_back(key);
auto& stored = store_[key];
size_t ds[] = {(size_t)kvpair.lens[0]};
TShape dshape(ds, ds + 1);
TBlob recv_blob((real_t*)kvpair.vals.data(), dshape, cpu::kDevMask); //cpu
// create NDArray that shares data with TBlob, 0 is dev id
NDArray recved = NDArray(recv_blob, 0);
if (stored.is_none()) {
// initialization
stored = NDArray(dshape, Context());
CopyFromTo(recved, &stored, 0);
} else if (sync_mode_) {
// synced push
auto& merged = merge_buf_[key];
if (merged.array.is_none()) {
merged.array = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
if(recved.shape()!=merged.array.shape()){
std::cerr<<"MERGE ERROR"<<ps::MyID()
<<" key:"<<key<<" recved "<<recved.shape()
<<" kv.lens:"<<kvpair.lens[0]
<<" merged"<<merged.array.shape()<<std::endl;
}
CopyFromTo(recved, &merged.array, 0);
} else {
merged.array += recved;
}
merged.request.push_back(req_meta); // add for each key
if (merged.request.size() == (size_t) ps::NumWorkers()) {
if (updater_) {
exec_.Exec([this, key, &merged, &stored](){
CHECK(updater_); updater_(key, merged.array, &stored);});
} else {
// if no updater, just copy
CopyFromTo(merged.array, stored);
}
apply_updates = true;
} else {
no_reply = true;
}
} else {
// async push
exec_.Exec([this, key, &recved, &stored](){
CHECK(updater_); updater_(key, recved, &stored);
});
}
}
// wait to read and send response
if (apply_updates && !no_reply){ // apply gradients
bool first_time = true;
for(const int key : decoded_keys){
auto& merged = merge_buf_[key];
if(first_time){ // reply once
for (const auto& req : merged.request) {
server->Response(req);
}
first_time = false;
}
merged.request.clear(); // clear for each key
store_[key].WaitToRead();
steps_[key] += 1;
SpeedoMeter(steps_[key]);
if (steps_[key] == start_scaling_step){
num_pull_key_[key] = 0;
}
}
} else if (!apply_updates && !no_reply){ // initialization or async
server->Response(req_meta);
for(const int key: decoded_keys){
store_[key].WaitToRead();
}
} else if (!apply_updates && no_reply){ // merge with no update
for(const int key: decoded_keys){
merge_buf_[key].array.WaitToRead();
}
}
}
void PullHandle(const ps::KVMeta& req_meta,
const std::vector<ps::KVPairs<real_t>>& kvpairs,
ps::KVServer<real_t>* server){
std::vector<int> decoded_keys;
ps::KVPairs<real_t> response;
int tot_size = 0; // count total value size
for(const ps::KVPairs<real_t> &kvpair : kvpairs){
response.keys.push_back(kvpair.keys[0]);
int key = DecodeKey(kvpair.keys[0]);
decoded_keys.push_back(key);
auto len = store_[key].shape().Size();
tot_size += len;
}
ps::SArray<real_t> vals(tot_size);
response.vals = vals;
real_t* p_vals = response.vals.data();
for(const int key: decoded_keys){ // copy data for response
auto& stored = store_[key];
CHECK(!stored.is_none()) << "init " << key << " first";
auto len = stored.shape().Size();
memcpy(p_vals, static_cast<const float*>(stored.data().dptr_), len*sizeof(real_t));
p_vals += len;
response.lens.push_back(len);
// std::cerr<<"Server "<<ps::MyID()<<" receive pull for key "<<key<<" size "<<len<<std::endl;
if(key==-1){
time_t now = time(0);
char* dt = ctime(&now);
std::cerr<<"Assume occuring straggler, sleeping 1s, time:"<<dt<<std::endl;
sleep(1);
}
auto step = steps_[key];
response.vers.push_back(step);
if(false&&start_scaling_step>0){
std::cerr<<"Receiving Pull:";
std::cerr<<"Server "<<ps::MyID()<<" step["<<key<<"] "<<step<<
" start_scaling_step "<<start_scaling_step<<" num_pull_key_ "
<< num_pull_key_[key]<<" from "<<req_meta.sender<<std::endl;
}
if (step == start_scaling_step){ // move parameter when scaling
num_pull_key_[key] ++;
// std::cerr<<"Server "<<ps::MyID()<<" step["<<key<<"] "<<step<<
// " start_scaling_step "<<start_scaling_step<<" num_pull_key_ "
// << num_pull_key_[key]<<" from "<<req_meta.sender<<std::endl;
if (num_pull_key_[key] == ps::NumWorkers()){
end_pulls_++;
// std::cerr<<" end_pull:"<<end_pulls_<<" pull_size:"<<pull_size<<std::endl;
if (end_pulls_ == pull_size) { // pull for all keys is finished
num_pull_key_.clear();
end_pulls_ = 0;
// std::cerr<<"endl_pulls_["<<end_pulls_<<"] = store_.size[" <<store_.size()<<"]"<<std::endl;
finish_pull = true;
MoveParams(server);
}
}
}
}
if(is_init == false){
int total_size = 0;
for (const auto& elem: store_){
int key = elem.first; // the key after decoding
int size = elem.second.shape().Size();
total_size += size;
}
if(total_size>overal_size){
overal_size = total_size;
} else if (total_size == overal_size){
is_init=true;
std::ofstream myfile;
char buf[80];
getcwd(buf,80);
//std::cerr<<"Buf of getcwd is:"<<buf<<std::endl;
std::string wd;
wd = std::string(buf) + std::string("/results/");
std::string file_name = wd+std::string("keyTimeRecorder_server")+".csv";
myfile.open(file_name,std::ios_base::app);
myfile<<"Server,"<<ps::MyID()<<", total_size,"<<total_size<<std::endl;
myfile.close();
}
}
server->Response(req_meta, response);
}
// handle push/pull with multiple keys
void AdvancedDataHandleDefault(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t> &req_data,
ps::KVServer<real_t>* server) {
CHECK_EQ(req_meta.cmd, static_cast<int>(DataHandleType::kDefaultPushPull));
// do some check
CHECK_GE(req_data.keys.size(), (size_t)1); // handle multiple keys
if (req_meta.push) CHECK_EQ(req_data.lens.size(), req_data.keys.size());
// slice the KVPairs into multiple one
std::vector<ps::KVPairs<real_t>> kvpairs;
int begin = 0;
for(size_t i=0; i<req_data.keys.size(); i++){
//LOG(INFO) << "server: " << ps::MyID() << " is_push: " << req_meta.push
//<< " key: " << req_data.keys[i]<<" sender "<<req_meta.sender;
sender_temp = req_meta.sender;
int key = DecodeKey(req_data.keys[i]);
sender_temp = 0;
ps::KVPairs<real_t> kvpair;
kvpair.keys.push_back(req_data.keys[i]);
if (req_meta.push)
{
kvpair.lens.push_back(req_data.lens[i]);
kvpair.vals = req_data.vals.segment(begin, begin+req_data.lens[i]);
begin += req_data.lens[i];
}
kvpairs.push_back(kvpair);
}
if (req_meta.push) {
PushHandle(req_meta, kvpairs, server);
} else {
PullHandle(req_meta, kvpairs, server);
}
}
int DecodeKey(ps::Key key) {
auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MapServerIDToRank(ps::MyID())];
if(key-kr.begin()>100000||key-kr.begin()<0){
std::cerr<<"Decode ERROR origin: "<<key<<" kr: "<<
kr.begin()<<" after: "<<key-kr.begin()
<<" by "<<ps::MyID()<<" num_servers: "<<ps::NumServers()
<<" from "<<sender_temp<<std::endl;
return DecodeKey_Scaling(key);
}
return key - kr.begin();
}
int DecodeKey_Scaling(ps::Key key){
int add = 0;
if(previous_scaling_server>0){
add=-1;
}else{add=1;}
auto kr_scaling = ps::Postoffice::Get()->GetServerKeyRanges_Scaling(add);
auto key_temp = key;
for(int i = 0; i< ps::NumServers()+1; i++){
auto id = 8 + i*2;
auto rank = ps::MapServerIDToRank(id);
auto temp = kr_scaling[i];
key_temp = key - temp.begin();
if(key_temp>=0 &&key_temp<=100000){break;}
}
if(key_temp>=0 && key_temp<=100000){
return key_temp;
}else{
LOG(ERROR)<<"DECODE FAIL! "<<add;
return 0;
}
}
// send parameter information on this server to the scheduler
void GetParaInfo(const ps::SimpleData& recved, ps::SimpleApp* app){
// response message format: "speed step key:size key:size key:size"
// improve this code using self-defined data structure and serialization
CHECK(!recved.body.size()) << "Request body of kGetParaInfo should be empty";
if (!store_.size()){
app->Response(recved); // no parameters on this server
LOG(INFO) << "No parameter info on Server " << ps::MyID();
return;
}
ParamInfo param_info;
param_info.speed = update_speed_;
param_info.step = last_step_;
int server_id = ps::MyID();
for (const auto& elem: store_){
int key = elem.first; // the key after decoding
int size = elem.second.shape().Size();
param_info.ps_kvs[server_id].push_back(std::make_pair(key,size));
}
std::string body = ParamInfoToString(param_info);
LOG(INFO) << "Parameter info on Server " << ps::MyID() << ": " << body;
app->Response(recved, body);
// app->Response(recved) in the caller function
}
// update parameters on this server based on the assignment from server
void SetParams(const ps::SimpleData& recved, ps::SimpleApp* app){
LOG(INFO) << "Server " << ps::MyID() << " received new parameter assignment.";
ParamInfo param_info = StringToParamInfo(recved.body);
// the new server will not get the new assignment
// at the specified step, each server sends parameters to others and acknowledges params from others
// then response to the scheduler
start_scaling_step = param_info.step;
end_scaling_step = start_scaling_step;
scaling_server_id_ = int(param_info.speed);
previous_scaling_server = scaling_server_id_;
pull_size = store_.size();
// no need mutex lock since DataHandleEx and RequestCommandHandle is called by same thread
move_out_key_dests.clear();
move_out_keys.clear();
move_in_key_dests.clear();
// figure out the keys sent to others
for(const auto server_kvs : param_info.ps_kvs){
int server_id = server_kvs.first;
for (const auto kv_pair : server_kvs.second) {
int key = kv_pair.first;
int size = kv_pair.second;
if(store_.count(key)){ // the key is on this server
if (server_id != ps::MyID()) {
// need to sent out the key to other servers
std::cerr<<"Server "<<ps::MyID()<<" send key "<<key<<" to server "<<server_id<<" sotre_.count "<<store_.count(key)<<std::endl;
move_out_key_dests[key] = server_id;
move_out_keys.insert(key);
}
} else if (server_id == ps::MyID()) {
std::cerr<<"Server "<<server_id<<" Move In Key Assignment "<< key <<std::endl;
move_in_key_dests[key] = server_id;
}
}
}
// save recved to resp_scaling
resp_scaling.head = recved.head;
resp_scaling.sender = recved.sender;
resp_scaling.timestamp = recved.timestamp;
resp_scaling.body = "";
}
// send the key to other servers if necessary
void MoveParams(ps::KVServer<real_t>* server){
struct timeval tv;
gettimeofday(&tv,NULL);
time_t interval = 1000000*tv.tv_sec+tv.tv_usec;
std::ofstream myfile;
myfile.open("/home/net/test/overhead.txt",std::ofstream::out|std::ofstream::app);
myfile<<"stage 3, start: "<<interval<<", ";
myfile.close();
// first update node_ids_ to enable communication between servers
LOG(INFO) << "Server " << ps::MyID() << " starts moving parameters out...";
if(scaling_server_id_!=-2){
std::cerr<<"Call for UpdateMetas in Server:"<<ps::MyID()<<std::endl;
UpdateMetas(scaling_server_id_);
}
for (auto& key_dest : move_out_key_dests){
int key = key_dest.first;
int dest = key_dest.second;
ps::KVPairs<real_t> kvs;
auto& stored = store_[key];
auto len = stored.shape().Size();
kvs.keys.push_back(key);
kvs.lens.push_back(len);
kvs.vers.push_back(steps_[key]);
kvs.vals.CopyFrom(static_cast<const float*>(stored.data().dptr_), len);
LOG(INFO) << "Server " << ps::MyID() << " is moving the parameters with key "
<< key << " to Server " << dest << "steps_"<<steps_[key];
server->Send(static_cast<int>(DataHandleType::kMoveParams), kvs, dest);
// delete from store_
store_.erase(key);
if (sync_mode_) merge_buf_.erase(key);
}
FinishParamMove();
}
// handle parameter movement request and response
void DataHandleMoveParams(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t> &req_data,
ps::KVServer<real_t>* server) {
// check if request or response
if (req_meta.request){
// push parameter here
CHECK_EQ(req_data.keys.size(), (size_t)1);
CHECK_EQ(req_data.lens.size(), (size_t)1);
CHECK_EQ(req_data.vers.size(), (size_t)1);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]);
int key = req_data.keys[0];
int ver = req_data.vers[0];
LOG(INFO) << "Server " << ps::MyID() << " received parameters with key " << key << " vers: " << ver;
move_in_key_dests.erase(key);
auto& stored = store_[key];
// copy parameters to store_
size_t ds[] = {(size_t)req_data.lens[0]};
TShape dshape(ds, ds + 1);
TBlob recv_blob((real_t*)req_data.vals.data(), // NOLINT(*)
dshape, cpu::kDevMask); //cpu
NDArray recved = NDArray(recv_blob, 0); // create NDArray that shares data with TBlob, 0 is dev id
CHECK(stored.is_none()) << "Parameters with key " << key
<< " are already existed on server " << ps::MyID();
stored = NDArray(dshape, Context());
CopyFromTo(recved, &stored, 0);
stored.WaitToRead();
steps_[key] = ver;
// send response back
ps::KVPairs<real_t> kvs;
kvs.keys.push_back(key);
server->Response(req_meta, kvs);
} else {
// get a response showing the parameter finished movement
CHECK_EQ(req_data.keys.size(), (size_t)1);
int key = req_data.keys[0];
LOG(INFO) << "Server " << ps::MyID() << " finished parameter move for key " << key;
CHECK_EQ(req_meta.sender, move_out_key_dests[key]);
move_out_key_dests.erase(key);
}
FinishParamMove();
}
void FinishParamMove(){
std::cerr << "In FinishParamMove function, server "<< ps::MyID()
<< " resp_scaling: sender "<<resp_scaling.sender<<" head "<<
resp_scaling.head<<" out_size "<<move_out_key_dests.size()
<<" in_size "<<move_in_key_dests.size()<<std::endl;
//Check param movement
ParamInfo param_info;
param_info.speed = update_speed_;
param_info.step = last_step_;
int server_id = ps::MyID();
for (const auto& elem: store_){
int key = elem.first; // the key after decoding
int size = elem.second.shape().Size();
param_info.ps_kvs[server_id].push_back(std::make_pair(key,size));
}
std::string body = ParamInfoToString(param_info);
//LOG(INFO) << "Parameter info on Server " << ps::MyID() << ": " << body;
// for the new node, do not send ack.
if (finish_pull&&(resp_scaling.sender > 0) && !(move_out_key_dests.size()) && !(move_in_key_dests.size())){
LOG(INFO) << "Server " << ps::MyID()<< " finished all parameter move";
LOG(INFO) << "resp_scaling: sender-"<<resp_scaling.sender
<< " head-"<<resp_scaling.head;
// use call back here for further optimization
static_cast<ps::SimpleApp*>(ps_server_)->Response(resp_scaling);
LOG(INFO) << "Server " << ps::MyID() << " sent acknowledgment to the scheduler";
finish_pull = false;
resp_scaling.sender = 0;
start_scaling_step = -1;
scaling_server_id_ = 0;
// write finish
const char* workdir = std::getenv("WORK_DIR");
if (workdir == NULL){
LOG(ERROR) << "Environment variable WORK_DIR is not set.";
} else {
std::string fn = std::string(workdir)+"SCALING.txt";
std::ofstream file;
file.open(fn);
file << "FINISH\n";
file.close();
}
// the first worker sends stop command to this server
//if (-scaling_server_id_ == ps::MyID()){
// exec_.Stop();
// exit(0);
// how to stop postoffice: scheduler send a barrier done?
//}
}
int total_size = 0;
for (const auto& elem: store_){
int key = elem.first; // the key after decoding
int size = elem.second.shape().Size();
total_size += size;
}
std::ofstream myfile;
char buf[80];
getcwd(buf,80);
//std::cerr<<"Buf of getcwd is:"<<buf<<std::endl;
std::string wd;
wd = std::string(buf) + std::string("/results/");
std::string file_name = wd+std::string("keyTimeRecorder_server")+".csv";
myfile.open(file_name,std::ios_base::app);
myfile<<"Server,"<<ps::MyID()<<", total_size,"<<total_size<<std::endl;
myfile.close();
}
/*
* measure parameter update speed
*/
void SpeedoMeter(int step){
CHECK_GT(step, 0);
if(step > last_step_){ // first key entering new iteration
last_step_ = step;
if (step == 1) { // first time calling SpeedoMeter
start_time = std::chrono::system_clock::now();
}
if (step % disp_freq_ == 0){
auto end_time = std::chrono::system_clock::now();
std::chrono::duration<double> diff = end_time - start_time;
update_speed_ = disp_freq_ / (diff.count());
start_time = std::chrono::system_clock::now();
LOG(INFO) << "Server: " << ps::MyID() << " Speed: " << update_speed_ << " batches/sec";
}
}
}
/*
* Temp server key range when just finishing DEC_SERVER
*/
/**
* variables for speedometer
*/
const int disp_freq_ = 10; // measure speed every 5 iteration
int last_step_ = 0;
double update_speed_;
// start scaling at this iteration
int start_scaling_step = -1;
int end_scaling_step = -1;
std::chrono::time_point<std::chrono::system_clock> start_time;
//key:dest_server
std::unordered_map<int,int> move_out_key_dests;
std::unordered_map<int,int> move_in_key_dests;
std::set<int> move_out_keys;
ps::SimpleData resp_scaling; // no effective if sender==0
//resp_scaling.sender = 0;
int scaling_server_id_;
int previous_scaling_server;
// counter for the pull times for each key
std::unordered_map<int, int> num_pull_key_;
size_t end_pulls_ = 0;
bool finish_pull = false;
int pull_size = 0;
int sender_temp = 0;
bool is_init = false;
int overal_size = 0;
/**
* \brief user defined mode for push
*/
bool sync_mode_;
KVStore::Controller controller_;
KVStore::Updater updater_;
/**
* \brief store_ contains the value at kvstore for each key
*/
std::unordered_map<int, NDArray> store_;
/**
* \brief merge_buf_ is a buffer used if sync_mode is true. It represents
* values from different workers being merged. The store will be updated
* to this value when values from all workers are pushed into this buffer.
*/
std::unordered_map<int, MergeBuf> merge_buf_;
/**
* \brief decomp_buf_ is a buffer into which compressed values are
* decompressed before merging to the store. used when compress_!='none'
*/
std::unordered_map<int, NDArray> decomp_buf_;
Executor exec_;
ps::KVServer<float>* ps_server_;
// whether to LOG verbose information
bool log_verbose_;
/**
* \brief gradient compression object.
* starts with none, used after SetGradientCompression sets the type
* currently there is no support for unsetting gradient compression
*/
std::shared_ptr<kvstore::GradientCompression> gradient_compression_;
/**
* track the update times (i.e., steps) of each key, this counter also migrates with the key together
*/
std::unordered_map<int, int> steps_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
|
Ellpack_impl.h | #pragma once
#include <Benchmarks/SpMV/ReferenceFormats/Legacy/Ellpack.h>
#include <TNL/Containers/Vector.h>
#include <TNL/Math.h>
#include <TNL/Exceptions/NotImplementedError.h>
namespace TNL {
namespace Benchmarks {
namespace SpMV {
namespace ReferenceFormats {
namespace Legacy {
template< typename Real,
typename Device,
typename Index >
Ellpack< Real, Device, Index > :: Ellpack()
: rowLengths( 0 ), alignedRows( 0 )
{
};
template< typename Real,
typename Device,
typename Index >
std::string Ellpack< Real, Device, Index >::getSerializationType()
{
return "Matrices::Ellpack< " +
TNL::getType< Real >() +
", [any device], " +
getType< Index >() +
" >";
}
template< typename Real,
typename Device,
typename Index >
std::string Ellpack< Real, Device, Index >::getSerializationTypeVirtual() const
{
return this->getSerializationType();
}
template< typename Real,
typename Device,
typename Index >
void Ellpack< Real, Device, Index >::setDimensions( const IndexType rows,
const IndexType columns )
{
TNL_ASSERT( rows > 0 && columns > 0,
std::cerr << "rows = " << rows
<< " columns = " << columns << std::endl );
this->rows = rows;
this->columns = columns;
if( std::is_same< Device, Devices::Cuda >::value )
{
this->alignedRows = roundToMultiple( columns, Cuda::getWarpSize() );
if( this->rows - this->alignedRows > 0 )
{
IndexType missingRows = this->rows - this->alignedRows;
missingRows = roundToMultiple( missingRows, Cuda::getWarpSize() );
this->alignedRows += missingRows;
}
}
else this->alignedRows = rows;
if( this->rowLengths != 0 )
allocateElements();
}
template< typename Real,
typename Device,
typename Index >
void Ellpack< Real, Device, Index >::setCompressedRowLengths( ConstRowsCapacitiesTypeView rowLengths )
{
TNL_ASSERT_GT( this->getRows(), 0, "cannot set row lengths of an empty matrix" );
TNL_ASSERT_GT( this->getColumns(), 0, "cannot set row lengths of an empty matrix" );
TNL_ASSERT_EQ( this->getRows(), rowLengths.getSize(), "wrong size of the rowLengths vector" );
this->rowLengths = this->maxRowLength = max( rowLengths );
allocateElements();
}
template< typename Real,
typename Device,
typename Index >
void Ellpack< Real, Device, Index >::setRowCapacities( ConstRowsCapacitiesTypeView rowLengths )
{
setCompressedRowLengths( rowLengths );
}
template< typename Real,
typename Device,
typename Index >
void Ellpack< Real, Device, Index >::getCompressedRowLengths( RowsCapacitiesTypeView rowLengths ) const
{
TNL_ASSERT_EQ( rowLengths.getSize(), this->getRows(), "invalid size of the rowLengths vector" );
for( IndexType row = 0; row < this->getRows(); row++ )
rowLengths.setElement( row, this->getRowLength( row ) );
}
template< typename Real,
typename Device,
typename Index >
void Ellpack< Real, Device, Index >::setConstantCompressedRowLengths( const IndexType& rowLengths )
{
TNL_ASSERT( rowLengths > 0,
std::cerr << " rowLengths = " << rowLengths );
this->rowLengths = rowLengths;
if( this->rows > 0 )
allocateElements();
}
template< typename Real,
typename Device,
typename Index >
Index Ellpack< Real, Device, Index >::getRowLength( const IndexType row ) const
{
return this->rowLengths;
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
Index Ellpack< Real, Device, Index >::getRowLengthFast( const IndexType row ) const
{
return this->rowLengths;
}
template< typename Real,
typename Device,
typename Index >
Index Ellpack< Real, Device, Index >::getNonZeroRowLength( const IndexType row ) const
{
ConstMatrixRow matrixRow = getRow( row );
return matrixRow.getNonZeroElementsCount( getType< Device >() );
}
template< typename Real,
typename Device,
typename Index >
template< typename Real2,
typename Device2,
typename Index2 >
void Ellpack< Real, Device, Index >::setLike( const Ellpack< Real2, Device2, Index2 >& matrix )
{
Sparse< Real, Device, Index >::setLike( matrix );
this->rowLengths = matrix.rowLengths;
if( std::is_same< Device, Devices::Cuda >::value )
this->alignedRows = roundToMultiple( this->getRows(), Cuda::getWarpSize() );
else this->alignedRows = this->getRows();
}
template< typename Real,
typename Device,
typename Index >
void Ellpack< Real, Device, Index > :: reset()
{
Sparse< Real, Device, Index >::reset();
this->rowLengths = 0;
this->alignedRows = 0;
}
template< typename Real,
typename Device,
typename Index >
template< typename Real2,
typename Device2,
typename Index2 >
bool Ellpack< Real, Device, Index >::operator == ( const Ellpack< Real2, Device2, Index2 >& matrix ) const
{
TNL_ASSERT( this->getRows() == matrix.getRows() &&
this->getColumns() == matrix.getColumns(),
std::cerr << "this->getRows() = " << this->getRows()
<< " matrix.getRows() = " << matrix.getRows()
<< " this->getColumns() = " << this->getColumns()
<< " matrix.getColumns() = " << matrix.getColumns() );
// TODO: implement this
return false;
}
template< typename Real,
typename Device,
typename Index >
template< typename Real2,
typename Device2,
typename Index2 >
bool Ellpack< Real, Device, Index >::operator != ( const Ellpack< Real2, Device2, Index2 >& matrix ) const
{
return ! ( ( *this ) == matrix );
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
bool Ellpack< Real, Device, Index > :: setElementFast( const IndexType row,
const IndexType column,
const Real& value )
{
return this->addElementFast( row, column, value, 0.0 );
}
template< typename Real,
typename Device,
typename Index >
bool Ellpack< Real, Device, Index > :: setElement( const IndexType row,
const IndexType column,
const Real& value )
{
return this->addElement( row, column, value, 0.0 );
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
bool Ellpack< Real, Device, Index > :: addElementFast( const IndexType row,
const IndexType column,
const RealType& value,
const RealType& thisElementMultiplicator )
{
// TODO: return this back when CUDA kernels support std::cerr
/*TNL_ASSERT( row >= 0 && row < this->rows &&
column >= 0 && column <= this->rows,
std::cerr << " row = " << row
<< " column = " << column
<< " this->rows = " << this->rows
<< " this->columns = " << this-> columns );*/
typedef EllpackDeviceDependentCode< DeviceType > DDCType;
IndexType i = DDCType::getRowBegin( *this, row );
const IndexType rowEnd = DDCType::getRowEnd( *this, row );
const IndexType step = DDCType::getElementStep( *this );
while( i < rowEnd &&
this->columnIndexes[ i ] < column &&
this->columnIndexes[ i ] != this->getPaddingIndex() ) i += step;
if( i == rowEnd )
return false;
if( this->columnIndexes[ i ] == column )
{
this->values[ i ] = thisElementMultiplicator * this->values[ i ] + value;
return true;
}
else
if( this->columnIndexes[ i ] == this->getPaddingIndex() ) // artificial zero
{
this->columnIndexes[ i ] = column;
this->values[ i ] = value;
}
else
{
Index j = rowEnd - step;
while( j > i )
{
this->columnIndexes[ j ] = this->columnIndexes[ j - step ];
this->values[ j ] = this->values[ j - step ];
j -= step;
}
this->columnIndexes[ i ] = column;
this->values[ i ] = value;
}
return true;
}
template< typename Real,
typename Device,
typename Index >
bool Ellpack< Real, Device, Index > :: addElement( const IndexType row,
const IndexType column,
const RealType& value,
const RealType& thisElementMultiplicator )
{
typedef EllpackDeviceDependentCode< DeviceType > DDCType;
IndexType i = DDCType::getRowBegin( *this, row );
const IndexType rowEnd = DDCType::getRowEnd( *this, row );
const IndexType step = DDCType::getElementStep( *this );
while( i < rowEnd &&
this->columnIndexes.getElement( i ) < column &&
this->columnIndexes.getElement( i ) != this->getPaddingIndex() ) i += step;
if( i == rowEnd )
return false;
if( this->columnIndexes.getElement( i ) == column )
{
this->values.setElement( i, thisElementMultiplicator * this->values.getElement( i ) + value );
return true;
}
else
if( this->columnIndexes.getElement( i ) == this->getPaddingIndex() )
{
this->columnIndexes.setElement( i, column );
this->values.setElement( i, value );
}
else
{
IndexType j = rowEnd - step;
while( j > i )
{
this->columnIndexes.setElement( j, this->columnIndexes.getElement( j - step ) );
this->values.setElement( j, this->values.getElement( j - step ) );
j -= step;
}
this->columnIndexes.setElement( i, column );
this->values.setElement( i, value );
}
return true;
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
bool Ellpack< Real, Device, Index > :: setRowFast( const IndexType row,
const IndexType* columnIndexes,
const RealType* values,
const IndexType elements )
{
typedef EllpackDeviceDependentCode< DeviceType > DDCType;
IndexType elementPointer = DDCType::getRowBegin( *this, row );
const IndexType rowEnd = DDCType::getRowEnd( *this, row );
const IndexType step = DDCType::getElementStep( *this );
if( elements > this->rowLengths )
return false;
for( Index i = 0; i < elements; i++ )
{
const IndexType column = columnIndexes[ i ];
if( column < 0 || column >= this->getColumns() )
return false;
this->columnIndexes[ elementPointer ] = column;
this->values[ elementPointer ] = values[ i ];
elementPointer += step;
}
for( Index i = elements; i < this->rowLengths; i++ )
{
this->columnIndexes[ elementPointer ] = this->getPaddingIndex();
elementPointer += step;
}
return true;
}
template< typename Real,
typename Device,
typename Index >
bool Ellpack< Real, Device, Index > :: setRow( const IndexType row,
const IndexType* columnIndexes,
const RealType* values,
const IndexType elements )
{
typedef EllpackDeviceDependentCode< DeviceType > DDCType;
IndexType elementPointer = DDCType::getRowBegin( *this, row );
//const IndexType rowEnd = DDCType::getRowEnd( *this, row );
const IndexType step = DDCType::getElementStep( *this );
if( elements > this->rowLengths )
return false;
for( IndexType i = 0; i < elements; i++ )
{
const IndexType column = columnIndexes[ i ];
if( column < 0 || column >= this->getColumns() )
return false;
this->columnIndexes.setElement( elementPointer, column );
this->values.setElement( elementPointer, values[ i ] );
elementPointer += step;
}
for( IndexType i = elements; i < this->rowLengths; i++ )
{
this->columnIndexes.setElement( elementPointer, this->getPaddingIndex() );
elementPointer += step;
}
return true;
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
bool Ellpack< Real, Device, Index > :: addRowFast( const IndexType row,
const IndexType* columns,
const RealType* values,
const IndexType numberOfElements,
const RealType& thisElementMultiplicator )
{
// TODO: implement
return false;
}
template< typename Real,
typename Device,
typename Index >
bool Ellpack< Real, Device, Index > :: addRow( const IndexType row,
const IndexType* columns,
const RealType* values,
const IndexType numberOfElements,
const RealType& thisElementMultiplicator )
{
return this->addRowFast( row, columns, values, numberOfElements );
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
Real Ellpack< Real, Device, Index >::getElementFast( const IndexType row,
const IndexType column ) const
{
typedef EllpackDeviceDependentCode< DeviceType > DDCType;
IndexType elementPtr = DDCType::getRowBegin( *this, row );
const IndexType rowEnd = DDCType::getRowEnd( *this, row );
const IndexType step = DDCType::getElementStep( *this );
while( elementPtr < rowEnd &&
this->columnIndexes[ elementPtr ] < column &&
this->columnIndexes[ elementPtr ] != this->getPaddingIndex() ) elementPtr += step;
if( elementPtr < rowEnd && this->columnIndexes[ elementPtr ] == column )
return this->values[ elementPtr ];
return 0.0;
}
template< typename Real,
typename Device,
typename Index >
Real Ellpack< Real, Device, Index >::getElement( const IndexType row,
const IndexType column ) const
{
typedef EllpackDeviceDependentCode< DeviceType > DDCType;
IndexType elementPtr = DDCType::getRowBegin( *this, row );
const IndexType rowEnd = DDCType::getRowEnd( *this, row );
const IndexType step = DDCType::getElementStep( *this );
while( elementPtr < rowEnd &&
this->columnIndexes.getElement( elementPtr ) < column &&
this->columnIndexes.getElement( elementPtr ) != this->getPaddingIndex() ) elementPtr += step;
if( elementPtr < rowEnd && this->columnIndexes.getElement( elementPtr ) == column )
return this->values.getElement( elementPtr );
return 0.0;
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
void Ellpack< Real, Device, Index >::getRowFast( const IndexType row,
IndexType* columns,
RealType* values ) const
{
//typedef EllpackDeviceDependentCode< DeviceType > DDCType;
IndexType elementPtr = DeviceDependentCode::getRowBegin( *this, row );
const IndexType rowEnd = DeviceDependentCode::getRowEnd( *this, row );
const IndexType step = DeviceDependentCode::getElementStep( *this );
for( IndexType i = 0; i < this->rowLengths; i++ )
{
columns[ i ] = this->columnIndexes[ elementPtr ];
values[ i ] = this->values[ elementPtr ];
elementPtr += step;
}
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
typename Ellpack< Real, Device, Index >::MatrixRow
Ellpack< Real, Device, Index >::
getRow( const IndexType rowIndex )
{
//printf( "this->rowLengths = %d this = %p \n", this->rowLengths, this );
IndexType rowBegin = DeviceDependentCode::getRowBegin( *this, rowIndex );
return MatrixRow( &this->columnIndexes[ rowBegin ],
&this->values[ rowBegin ],
this->rowLengths,
DeviceDependentCode::getElementStep( *this ) );
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
typename Ellpack< Real, Device, Index >::ConstMatrixRow
Ellpack< Real, Device, Index >::
getRow( const IndexType rowIndex ) const
{
//printf( "this->rowLengths = %d this = %p \n", this->rowLengths, this );
IndexType rowBegin = DeviceDependentCode::getRowBegin( *this, rowIndex );
return ConstMatrixRow( &this->columnIndexes[ rowBegin ],
&this->values[ rowBegin ],
this->rowLengths,
DeviceDependentCode::getElementStep( *this ) );
}
template< typename Real,
typename Device,
typename Index >
template< typename Vector >
__cuda_callable__
typename Vector::RealType Ellpack< Real, Device, Index >::rowVectorProduct( const IndexType row,
const Vector& vector ) const
{
IndexType i = DeviceDependentCode::getRowBegin( *this, row );
const IndexType rowEnd = DeviceDependentCode::getRowEnd( *this, row );
const IndexType step = DeviceDependentCode::getElementStep( *this );
Real result = 0.0;
while( i < rowEnd && this->columnIndexes[ i ] != this->getPaddingIndex() )
{
const Index column = this->columnIndexes[ i ];
result += this->values[ i ] * vector[ column ];
i += step;
}
return result;
}
template< typename Real,
typename Device,
typename Index >
template< typename InVector,
typename OutVector >
void Ellpack< Real, Device, Index >::vectorProduct( const InVector& inVector,
OutVector& outVector,
RealType multiplicator ) const
{
DeviceDependentCode::vectorProduct( *this, inVector, outVector, multiplicator );
}
template< typename Real,
typename Device,
typename Index >
template< typename Real2,
typename Index2 >
void Ellpack< Real, Device, Index > :: addMatrix( const Ellpack< Real2, Device, Index2 >& matrix,
const RealType& matrixMultiplicator,
const RealType& thisMatrixMultiplicator )
{
throw Exceptions::NotImplementedError( "Ellpack::addMatrix is not implemented." );
// TODO: implement
}
template< typename Real,
typename Device,
typename Index >
template< typename Real2,
typename Index2 >
void Ellpack< Real, Device, Index >::getTransposition( const Ellpack< Real2, Device, Index2 >& matrix,
const RealType& matrixMultiplicator )
{
throw Exceptions::NotImplementedError( "Ellpack::getTransposition is not implemented." );
// TODO: implement
}
template< typename Real,
typename Device,
typename Index >
template< typename Vector1, typename Vector2 >
bool Ellpack< Real, Device, Index > :: performSORIteration( const Vector1& b,
const IndexType row,
Vector2& x,
const RealType& omega ) const
{
TNL_ASSERT( row >=0 && row < this->getRows(),
std::cerr << "row = " << row
<< " this->getRows() = " << this->getRows() << std::endl );
RealType diagonalValue( 0.0 );
RealType sum( 0.0 );
IndexType i = DeviceDependentCode::getRowBegin( *this, row );
const IndexType rowEnd = DeviceDependentCode::getRowEnd( *this, row );
//const IndexType step = DeviceDependentCode::getElementStep( *this );
IndexType column;
while( i < rowEnd && ( column = this->columnIndexes[ i ] ) < this->columns )
{
if( column == row )
diagonalValue = this->values[ i ];
else
sum += this->values[ i ] * x[ column ];
i++;
}
if( diagonalValue == ( Real ) 0.0 )
{
std::cerr << "There is zero on the diagonal in " << row << "-th row of a matrix. I cannot perform SOR iteration." << std::endl;
return false;
}
x[ row ] = ( 1.0 - omega ) * x[ row ] + omega / diagonalValue * ( b[ row ] - sum );
return true;
}
template< typename Real,
typename Device,
typename Index >
template< typename Vector >
bool
Ellpack< Real, Device, Index >::
performJacobiIteration( const Vector& b,
const IndexType row,
const Vector& old_x,
Vector& x,
const RealType& omega ) const
{
TNL_ASSERT( row >=0 && row < this->getRows(),
std::cerr << "row = " << row << " this->getRows() = " << this->getRows() << std::endl );
RealType diagonalValue( 0.0 );
RealType sum( 0.0 );
IndexType i = DeviceDependentCode::getRowBegin( *this, row );
const IndexType rowEnd = DeviceDependentCode::getRowEnd( *this, row );
//const IndexType step = DeviceDependentCode::getElementStep( *this );
IndexType column;
while( i < rowEnd && ( column = this->columnIndexes[ i ] ) < this->columns )
{
if( column == row )
diagonalValue = this->values[ i ];
else
sum += this->values[ i ] * old_x[ column ];
i++;
}
if( diagonalValue == ( Real ) 0.0 )
{
std::cerr << "There is zero on the diagonal in " << row << "-th row of the matrix. I cannot perform the Jacobi iteration." << std::endl;
return false;
}
x[ row ] = ( 1.0 - omega ) * old_x[ row ] + omega / diagonalValue * ( b[ row ] - sum );
return true;
}
// copy assignment
template< typename Real,
typename Device,
typename Index >
Ellpack< Real, Device, Index >&
Ellpack< Real, Device, Index >::operator=( const Ellpack& matrix )
{
this->setLike( matrix );
this->values = matrix.values;
this->columnIndexes = matrix.columnIndexes;
return *this;
}
// cross-device copy assignment
template< typename Real,
typename Device,
typename Index >
template< typename Real2, typename Device2, typename Index2, typename >
Ellpack< Real, Device, Index >&
Ellpack< Real, Device, Index >::operator=( const Ellpack< Real2, Device2, Index2 >& matrix )
{
static_assert( std::is_same< Device, Devices::Host >::value || std::is_same< Device, Devices::Cuda >::value,
"unknown device" );
static_assert( std::is_same< Device2, Devices::Host >::value || std::is_same< Device2, Devices::Cuda >::value,
"unknown device" );
// setLike does not work here due to different alignment on Cuda and Host
this->rowLengths = matrix.rowLengths;
this->setDimensions( matrix.getRows(), matrix.getColumns() );
const int blockSize = 32;
const int blocks = roundUpDivision( this->getRows(), blockSize );
// host -> cuda
if( std::is_same< Device, Devices::Cuda >::value ) {
typename ValuesVector::template Self< typename ValuesVector::ValueType, Devices::Sequential > tmpValues;
typename ColumnIndexesVector::template Self< typename ColumnIndexesVector::ValueType, Devices::Sequential > tmpColumnIndexes;
tmpValues.setLike( this->values );
tmpColumnIndexes.setLike( this->columnIndexes );
#ifdef HAVE_OPENMP
#pragma omp parallel for if( Devices::Host::isOMPEnabled() )
#endif
for( Index b = 0; b < blocks; b++ ) {
const Index offset = b * blockSize;
for( Index j = 0; j < rowLengths; j++ )
for( Index i = 0; i < blockSize && offset + i < this->getRows(); i++ ) {
tmpValues[ offset + j * alignedRows + i ] = matrix.values[ ( offset + i ) * rowLengths + j ];
tmpColumnIndexes[ offset + j * alignedRows + i ] = matrix.columnIndexes[ ( offset + i ) * rowLengths + j ];
}
}
this->values = tmpValues;
this->columnIndexes = tmpColumnIndexes;
}
// cuda -> host
if( std::is_same< Device, Devices::Host >::value ) {
ValuesVector tmpValues;
ColumnIndexesVector tmpColumnIndexes;
tmpValues.setLike( matrix.values );
tmpColumnIndexes.setLike( matrix.columnIndexes );
tmpValues = matrix.values;
tmpColumnIndexes = matrix.columnIndexes;
#ifdef HAVE_OPENMP
#pragma omp parallel for if( Devices::Host::isOMPEnabled() )
#endif
for( Index b = 0; b < blocks; b++ ) {
const Index offset = b * rowLengths;
for( Index i = 0; i < blockSize && b * blockSize + i < this->getRows(); i++ )
for( Index j = 0; j < rowLengths; j++ ) {
this->values[ offset + i * rowLengths + j ] = tmpValues[ b * blockSize + j * matrix.alignedRows + i ];
this->columnIndexes[ offset + i * rowLengths + j ] = tmpColumnIndexes[ b * blockSize + j * matrix.alignedRows + i ];
}
}
}
return *this;
}
template< typename Real,
typename Device,
typename Index >
void Ellpack< Real, Device, Index >::save( File& file ) const
{
Sparse< Real, Device, Index >::save( file);
file.save( &this->rowLengths );
}
template< typename Real,
typename Device,
typename Index >
void Ellpack< Real, Device, Index >::load( File& file )
{
Sparse< Real, Device, Index >::load( file);
file.load( &this->rowLengths );
}
template< typename Real,
typename Device,
typename Index >
void Ellpack< Real, Device, Index >::save( const String& fileName ) const
{
Object::save( fileName );
}
template< typename Real,
typename Device,
typename Index >
void Ellpack< Real, Device, Index >::load( const String& fileName )
{
Object::load( fileName );
}
template< typename Real,
typename Device,
typename Index >
void Ellpack< Real, Device, Index >::print( std::ostream& str ) const
{
for( IndexType row = 0; row < this->getRows(); row++ )
{
str <<"Row: " << row << " -> ";
IndexType i = DeviceDependentCode::getRowBegin( *this, row );
const IndexType rowEnd = DeviceDependentCode::getRowEnd( *this, row );
const IndexType step = DeviceDependentCode::getElementStep( *this );
while( i < rowEnd &&
this->columnIndexes.getElement( i ) < this->columns &&
this->columnIndexes.getElement( i ) != this->getPaddingIndex() )
{
const Index column = this->columnIndexes.getElement( i );
str << " Col:" << column << "->" << this->values.getElement( i ) << "\t";
i += step;
}
str << std::endl;
}
}
template< typename Real,
typename Device,
typename Index >
void Ellpack< Real, Device, Index >::allocateElements()
{
const IndexType numMtxElmnts = this->alignedRows * this->rowLengths;
if( this->alignedRows != 0 )
{
TNL_ASSERT_EQ( numMtxElmnts / this->alignedRows, this->rowLengths,
"Ellpack cannot store this matrix. The number of matrix elements has overflown the value that IndexType is capable of storing" );
}
Sparse< Real, Device, Index >::allocateMatrixElements( numMtxElmnts );
}
template<>
class EllpackDeviceDependentCode< Devices::Host >
{
public:
typedef Devices::Host Device;
template< typename Real,
typename Index >
__cuda_callable__
static Index getRowBegin( const Ellpack< Real, Device, Index >& matrix,
const Index row )
{
return row * matrix.rowLengths;
}
template< typename Real,
typename Index >
__cuda_callable__
static Index getRowEnd( const Ellpack< Real, Device, Index >& matrix,
const Index row )
{
return ( row + 1 ) * matrix.rowLengths;
}
template< typename Real,
typename Index >
__cuda_callable__
static Index getElementStep( const Ellpack< Real, Device, Index >& matrix )
{
return 1;
}
template< typename Real,
typename Index,
typename InVector,
typename OutVector >
static void vectorProduct( const Ellpack< Real, Device, Index >& matrix,
const InVector& inVector,
OutVector& outVector,
Real multiplicator )
{
#ifdef HAVE_OPENMP
#pragma omp parallel for if( Devices::Host::isOMPEnabled() )
#endif
for( Index row = 0; row < matrix.getRows(); row ++ )
outVector[ row ] = matrix.rowVectorProduct( row, inVector ) * multiplicator;
/*Index col;
for( Index row = 0; row < matrix.getRows(); row ++ )
{
outVector[ row ] = 0.0;
const Index rowEnd = ( row + 1 ) * matrix.rowLengths;
for( Index i = row * matrix.rowLengths; i < rowEnd; i++ )
if( ( col = matrix.columnIndexes[ i ] ) < matrix.columns )
outVector[ row ] += matrix.values[ i ] * inVector[ col ];
}*/
}
};
#ifdef HAVE_CUDA
template<
typename Real,
typename Index >
__global__ void EllpackVectorProductCudaKernel(
const Index rows,
const Index columns,
const Index compressedRowLengths,
const Index alignedRows,
const Index paddingIndex,
const Index* columnIndexes,
const Real* values,
const Real* inVector,
Real* outVector,
Real multiplicator,
const Index gridIdx )
{
const Index rowIdx = ( gridIdx * Cuda::getMaxGridSize() + blockIdx.x ) * blockDim.x + threadIdx.x;
if( rowIdx >= rows )
return;
Index i = rowIdx;
Index el( 0 );
Real result( 0.0 );
Index columnIndex;
while( el++ < compressedRowLengths &&
( columnIndex = columnIndexes[ i ] ) < columns &&
columnIndex != paddingIndex )
{
result += values[ i ] * inVector[ columnIndex ];
i += alignedRows;
}
outVector[ rowIdx ] = result * multiplicator;
}
#endif
template<>
class EllpackDeviceDependentCode< Devices::Cuda >
{
public:
typedef Devices::Cuda Device;
template< typename Real,
typename Index >
__cuda_callable__
static Index getRowBegin( const Ellpack< Real, Device, Index >& matrix,
const Index row )
{
return row;
}
template< typename Real,
typename Index >
__cuda_callable__
static Index getRowEnd( const Ellpack< Real, Device, Index >& matrix,
const Index row )
{
return row + getElementStep( matrix ) * matrix.rowLengths;
}
template< typename Real,
typename Index >
__cuda_callable__
static Index getElementStep( const Ellpack< Real, Device, Index >& matrix )
{
return matrix.alignedRows;
}
template< typename Real,
typename Index,
typename InVector,
typename OutVector >
static void vectorProduct( const Ellpack< Real, Device, Index >& matrix,
const InVector& inVector,
OutVector& outVector,
Real multiplicator )
{
//MatrixVectorProductCuda( matrix, inVector, outVector );
#ifdef HAVE_CUDA
typedef Ellpack< Real, Device, Index > Matrix;
typedef typename Matrix::IndexType IndexType;
//Matrix* kernel_this = Cuda::passToDevice( matrix );
//InVector* kernel_inVector = Cuda::passToDevice( inVector );
//OutVector* kernel_outVector = Cuda::passToDevice( outVector );
dim3 cudaBlockSize( 256 ), cudaGridSize( Cuda::getMaxGridSize() );
const IndexType cudaBlocks = roundUpDivision( matrix.getRows(), cudaBlockSize.x );
const IndexType cudaGrids = roundUpDivision( cudaBlocks, Cuda::getMaxGridSize() );
for( IndexType gridIdx = 0; gridIdx < cudaGrids; gridIdx++ )
{
if( gridIdx == cudaGrids - 1 )
cudaGridSize.x = cudaBlocks % Cuda::getMaxGridSize();
EllpackVectorProductCudaKernel
< Real, Index >
<<< cudaGridSize, cudaBlockSize >>>
( matrix.getRows(),
matrix.getColumns(),
matrix.rowLengths,
matrix.alignedRows,
matrix.getPaddingIndex(),
matrix.columnIndexes.getData(),
matrix.values.getData(),
inVector.getData(),
outVector.getData(),
multiplicator,
gridIdx );
TNL_CHECK_CUDA_DEVICE;
}
//Cuda::freeFromDevice( kernel_this );
//Cuda::freeFromDevice( kernel_inVector );
//Cuda::freeFromDevice( kernel_outVector );
TNL_CHECK_CUDA_DEVICE;
cudaDeviceSynchronize();
#endif
}
};
} //namespace Legacy
} //namespace ReferenceFormats
} //namespace SpMV
} //namespace Benchmarks
} // namespace TNL
|
GB_binop__bset_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int16)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int16)
// C=scalar+B GB (_bind1st__bset_int16)
// C=scalar+B' GB (_bind1st_tran__bset_int16)
// C=A+scalar GB (_bind2nd__bset_int16)
// C=A'+scalar GB (_bind2nd_tran__bset_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = GB_BITSET (aij, bij, int16_t, 16)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, int16_t, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_INT16 || GxB_NO_BSET_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bset_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, int16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, int16_t, 16) ; \
}
GrB_Info GB (_bind1st_tran__bset_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, int16_t, 16) ; \
}
GrB_Info GB (_bind2nd_tran__bset_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
knn.c | #if (defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER))
#pragma GCC push_options
#pragma GCC optimize ("unroll-loops")
#endif
#include <R.h>
#include <Rinternals.h>
#include "safeomp.h"
#include "types.h"
#define MIN(a,b) ((a)<(b)?(a):(b))
#define FREE(ptr) if(ptr!=NULL) free(ptr)
typedef struct
{
int k;
double *restrict dists;
int *restrict labels;
} voters_t;
static inline void classify_get_dists(cint m, cint n, cdbl_r x, cdbl_r test_obs, dbl_r dists)
{
memset(dists, 0, m*sizeof(*dists));
#pragma omp parallel for if(m*n>OMP_MIN_SIZE)
for (int j=0; j<n; j++)
{
SAFE_SIMD
for (int i=0; i<m; i++)
{
const double tmp = x[i + m*j] - test_obs[j];
dists[i] += tmp*tmp;
}
}
}
static inline int classify_single1(cint m, cint n, cdbl_r x, cint_r y, cdbl_r test_obs, dbl_r dists)
{
classify_get_dists(m, n, x, test_obs, dists);
double min = dists[0];
int group = y[0];
for (int i=1; i<m; i++)
{
if (dists[i] < min)
{
min = dists[i];
group = y[i];
}
}
return group;
}
// assume x[1]:x[len-1] are sorted largest to smallest
static inline void max2min_sort(const int len, double *const restrict x, int *const restrict y)
{
for (int i=0; i<=len; i++)
{
if (i == len || x[i] < x[0])
{
for (int j=1; j<i; j++)
{
double xtmp = x[j];
x[j] = x[j-1];
x[j-1] = xtmp;
int ytmp = y[j];
y[j] = y[j-1];
y[j-1] = ytmp;
}
return;
}
}
}
// TODO for now, ties are resolved by the smallest group number
static inline int vote(voters_t *const restrict voters)
{
const int k = voters->k;
double *const restrict tally = voters->dists;
int *const restrict votes = voters->labels;
int group;
memset(tally, 0, k*sizeof(*tally));
SAFE_FOR_SIMD
for (int i=0; i<k; i++)
tally[votes[i]-1] += 1.0;
group = 0;
for (int i=1; i<k; i++)
{
if (tally[i] > tally[group])
group = i;
}
return group+1;
}
static inline int classify_single(voters_t *const restrict voters, cint m, cint n, cdbl_r x, cint_r y, cdbl_r test_obs, dbl_r dists)
{
const int k = voters->k;
classify_get_dists(m, n, x, test_obs, dists);
// get voters and vote
SAFE_FOR_SIMD
for (int i=0; i<k; i++)
{
voters->dists[i] = dists[i];
voters->labels[i] = y[i];
}
for (int i=k; i<m; i++)
{
if (dists[i] < voters->dists[0])
{
voters->dists[0] = dists[i];
voters->labels[0] = y[i];
max2min_sort(k, voters->dists, voters->labels);
}
}
return vote(voters);
}
SEXP R_knn(SEXP x_, SEXP y_, SEXP test_, SEXP k_)
{
SEXP ret;
cdbl_r x = REAL(x_);
cint_r y = INTEGER(y_);
cdbl_r test = REAL(test_);
cint m = nrows(x_);
cint n = ncols(x_);
cint mtest = nrows(test_);
cint k = INTEGER(k_)[0];
PROTECT(ret = allocVector(INTSXP, mtest));
int_r ret_pt = INTEGER(ret);
double *dists = malloc(m * sizeof(*dists));
double *test_obs = malloc(n * sizeof(*test_obs));
if (dists == NULL || test_obs == NULL)
{
FREE(dists);
FREE(test_obs);
error("OOM");
}
if (k == 1)
{
for (int b=0; b<mtest; b++)
{
for (int j=0; j<n; j++)
test_obs[j] = (test+b)[j*mtest];
ret_pt[b] = classify_single1(m, n, x, y, test_obs, dists);
}
}
else
{
voters_t voters;
double *mindists = malloc(k * sizeof(*mindists));
int *mindists_labels = malloc(k * sizeof(*mindists_labels));
if (mindists == NULL || mindists_labels == NULL)
{
FREE(mindists);
FREE(mindists_labels);
FREE(dists);
FREE(test_obs);
error("OOM");
}
voters.k = k;
voters.dists = mindists;
voters.labels = mindists_labels;
for (int b=0; b<mtest; b++)
{
for (int j=0; j<n; j++)
test_obs[j] = (test+b)[j*mtest];
ret_pt[b] = classify_single(&voters, m, n, x, y, test_obs, dists);
}
free(mindists);
free(mindists_labels);
}
free(dists);
free(test_obs);
UNPROTECT(1);
return ret;
}
|
gqsort_kernel.h | #pragma omp target teams num_teams(blocks_size) thread_limit(GQSORT_LOCAL_WORKGROUP_SIZE)
{
uint lt[GQSORT_LOCAL_WORKGROUP_SIZE+1];
uint gt[GQSORT_LOCAL_WORKGROUP_SIZE+1];
uint ltsum, gtsum, lbeg, gbeg;
#pragma omp parallel
{
const uint blockid = omp_get_team_num();
const uint localid = omp_get_thread_num();
uint i, lfrom, gfrom, ltp = 0, gtp = 0;
T lpivot, gpivot, tmp;
T *s, *sn;
// Get the sequence block assigned to this work group
block_record<T> block = blocksb[blockid];
uint start = block.start, end = block.end, pivot = block.pivot, direction = block.direction;
parent_record* pparent = parentsb + block.parent;
uint* psstart, *psend, *poldstart, *poldend, *pblockcount;
// GPU-Quicksort cannot sort in place, as the regular quicksort algorithm can.
// It therefore needs two arrays to sort things out. We start sorting in the
// direction of d -> dn and then change direction after each run of gqsort_kernel.
// Which direction we are sorting: d -> dn or dn -> d?
if (direction == 1) {
s = d;
sn = dn;
} else {
s = dn;
sn = d;
}
// Set thread __shared__ counters to zero
lt[localid] = gt[localid] = 0;
#pragma omp barrier
// Align thread accesses for coalesced reads.
// Go through data...
for(i = start + localid; i < end; i += GQSORT_LOCAL_WORKGROUP_SIZE) {
tmp = s[i];
// counting elements that are smaller ...
if (tmp < pivot)
ltp++;
// or larger compared to the pivot.
if (tmp > pivot)
gtp++;
}
lt[localid] = ltp;
gt[localid] = gtp;
#pragma omp barrier
// calculate cumulative sums
uint n;
for(i = 1; i < GQSORT_LOCAL_WORKGROUP_SIZE; i <<= 1) {
n = 2*i - 1;
if ((localid & n) == n) {
lt[localid] += lt[localid-i];
gt[localid] += gt[localid-i];
}
#pragma omp barrier
}
if ((localid & n) == n) {
lt[GQSORT_LOCAL_WORKGROUP_SIZE] = ltsum = lt[localid];
gt[GQSORT_LOCAL_WORKGROUP_SIZE] = gtsum = gt[localid];
lt[localid] = 0;
gt[localid] = 0;
}
for(i = GQSORT_LOCAL_WORKGROUP_SIZE/2; i >= 1; i >>= 1) {
n = 2*i - 1;
if ((localid & n) == n) {
plus_prescan(<[localid - i], <[localid]);
plus_prescan(>[localid - i], >[localid]);
}
#pragma omp barrier
}
// Allocate memory in the sequence this block is a part of
if (localid == 0) {
// get shared variables
psstart = &pparent->sstart;
psend = &pparent->send;
poldstart = &pparent->oldstart;
poldend = &pparent->oldend;
pblockcount = &pparent->blockcount;
// Atomic increment allocates memory to write to.
#pragma omp atomic capture
{
lbeg = *psstart;
*psstart += ltsum;
}
#pragma omp atomic capture
{
gbeg = *psend;
*psend -= gtsum;
}
gbeg -= gtsum;
//lbeg = atomicAdd(psstart, ltsum);
// Atomic is necessary since multiple blocks access this
//gbeg = atomicSub(psend, gtsum) - gtsum;
}
#pragma omp barrier
// Allocate locations for work items
lfrom = lbeg + lt[localid];
gfrom = gbeg + gt[localid];
// go thru data again writing elements to their correct position
for(i = start + localid; i < end; i += GQSORT_LOCAL_WORKGROUP_SIZE) {
tmp = s[i];
// increment counts
if (tmp < pivot)
sn[lfrom++] = tmp;
if (tmp > pivot)
sn[gfrom++] = tmp;
}
#pragma omp barrier
if (localid == 0) {
uint old_blockcount;
#pragma omp atomic capture
{
old_blockcount = *pblockcount;
(*pblockcount)--;
}
if (old_blockcount == 0) { //if (atomicSub(pblockcount, 1) == 0)
uint sstart = *psstart;
uint send = *psend;
uint oldstart = *poldstart;
uint oldend = *poldend;
// Store the pivot value between the new sequences
for(i = sstart; i < send; i ++) {
d[i] = pivot;
}
lpivot = sn[oldstart];
gpivot = sn[oldend-1];
if (oldstart < sstart) {
lpivot = median(lpivot,sn[(oldstart+sstart) >> 1], sn[sstart-1]);
}
if (send < oldend) {
gpivot = median(sn[send],sn[(oldend+send) >> 1], gpivot);
}
work_record<T>* result1 = result + 2*blockid;
work_record<T>* result2 = result1 + 1;
// change the direction of the sort.
direction ^= 1;
work_record<T> r1 = {oldstart, sstart, lpivot, direction};
*result1 = r1;
work_record<T> r2 = {send, oldend, gpivot, direction};
*result2 = r2;
}
}
}
}
|
conversion.h | /*
* Copyright 2008-2009 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cusp/ell_matrix.h>
#include <cusp/exception.h>
#include <cusp/detail/host/conversion_utils.h>
#include <thrust/fill.h>
#include <thrust/extrema.h>
#include <thrust/count.h>
namespace cusp
{
namespace detail
{
namespace host
{
/////////////////////
// COO Conversions //
/////////////////////
template <typename Matrix1, typename Matrix2>
void coo_to_csr(const Matrix1& src, Matrix2& dst)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
dst.resize(src.num_rows, src.num_cols, src.num_entries);
// compute number of non-zero entries per row of A
thrust::fill(dst.row_offsets.begin(), dst.row_offsets.end(), IndexType(0));
for (size_t n = 0; n < src.num_entries; n++)
dst.row_offsets[src.row_indices[n]]++;
// cumsum the num_entries per row to get dst.row_offsets[]
IndexType cumsum = 0;
for(size_t i = 0; i < src.num_rows; i++)
{
IndexType temp = dst.row_offsets[i];
dst.row_offsets[i] = cumsum;
cumsum += temp;
}
dst.row_offsets[src.num_rows] = cumsum;
// write Aj,Ax into dst.column_indices,dst.values
for(size_t n = 0; n < src.num_entries; n++)
{
IndexType row = src.row_indices[n];
IndexType dest = dst.row_offsets[row];
dst.column_indices[dest] = src.column_indices[n];
dst.values[dest] = src.values[n];
dst.row_offsets[row]++;
}
IndexType last = 0;
for(size_t i = 0; i <= src.num_rows; i++)
{
IndexType temp = dst.row_offsets[i];
dst.row_offsets[i] = last;
last = temp;
}
//csr may contain duplicates
}
template <typename Matrix1, typename Matrix2>
void coo_to_array2d(const Matrix1& src, Matrix2& dst)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
dst.resize(src.num_rows, src.num_cols);
thrust::fill(dst.values.begin(), dst.values.end(), ValueType(0));
for(size_t n = 0; n < src.num_entries; n++)
dst(src.row_indices[n], src.column_indices[n]) += src.values[n]; //sum duplicates
}
/////////////////////
// CSR Conversions //
/////////////////////
template <typename Matrix1, typename Matrix2>
void csr_to_coo(const Matrix1& src, Matrix2& dst)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
dst.resize(src.num_rows, src.num_cols, src.num_entries);
// TODO replace with offsets_to_indices
for(size_t i = 0; i < src.num_rows; i++)
for(IndexType jj = src.row_offsets[i]; jj < src.row_offsets[i + 1]; jj++)
dst.row_indices[jj] = i;
cusp::copy(src.column_indices, dst.column_indices);
cusp::copy(src.values, dst.values);
}
template <typename Matrix1, typename Matrix2>
void csr_to_dia(const Matrix1& src, Matrix2& dst,
const size_t alignment = 32)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
// compute number of occupied diagonals and enumerate them
size_t num_diagonals = 0;
cusp::array1d<IndexType,cusp::host_memory> diag_map(src.num_rows + src.num_cols, 0);
for(size_t i = 0; i < src.num_rows; i++)
{
for(IndexType jj = src.row_offsets[i]; jj < src.row_offsets[i+1]; jj++)
{
size_t j = src.column_indices[jj];
size_t map_index = (src.num_rows - i) + j; //offset shifted by + num_rows
if(diag_map[map_index] == 0)
{
diag_map[map_index] = 1;
num_diagonals++;
}
}
}
// allocate DIA structure
dst.resize(src.num_rows, src.num_cols, src.num_entries, num_diagonals, alignment);
// fill in diagonal_offsets array
for(size_t n = 0, diag = 0; n < src.num_rows + src.num_cols; n++)
{
if(diag_map[n] == 1)
{
diag_map[n] = diag;
dst.diagonal_offsets[diag] = (IndexType) n - (IndexType) src.num_rows;
diag++;
}
}
// fill in values array
thrust::fill(dst.values.values.begin(), dst.values.values.end(), ValueType(0));
for(size_t i = 0; i < src.num_rows; i++)
{
for(IndexType jj = src.row_offsets[i]; jj < src.row_offsets[i+1]; jj++)
{
size_t j = src.column_indices[jj];
size_t map_index = (src.num_rows - i) + j; //offset shifted by + num_rows
size_t diag = diag_map[map_index];
dst.values(i, diag) = src.values[jj];
}
}
}
template <typename Matrix1, typename Matrix2>
void csr_to_hyb(const Matrix1& src, Matrix2& dst,
const size_t num_entries_per_row,
const size_t alignment = 32)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
// The ELL portion of the HYB matrix will have 'num_entries_per_row' columns.
// Nonzero values that do not fit within the ELL structure are placed in the
// COO format portion of the HYB matrix.
// compute number of nonzeros in the ELL and COO portions
size_t num_ell_entries = 0;
for(size_t i = 0; i < src.num_rows; i++)
num_ell_entries += thrust::min<size_t>(num_entries_per_row, src.row_offsets[i+1] - src.row_offsets[i]);
IndexType num_coo_entries = src.num_entries - num_ell_entries;
dst.resize(src.num_rows, src.num_cols,
num_ell_entries, num_coo_entries,
num_entries_per_row, alignment);
const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::host_memory>::invalid_index;
// pad out ELL format with zeros
thrust::fill(dst.ell.column_indices.values.begin(), dst.ell.column_indices.values.end(), invalid_index);
thrust::fill(dst.ell.values.values.begin(), dst.ell.values.values.end(), ValueType(0));
for(size_t i = 0, coo_nnz = 0; i < src.num_rows; i++)
{
size_t n = 0;
IndexType jj = src.row_offsets[i];
// copy up to num_cols_per_row values of row i into the ELL
while(jj < src.row_offsets[i+1] && n < num_entries_per_row)
{
dst.ell.column_indices(i,n) = src.column_indices[jj];
dst.ell.values(i,n) = src.values[jj];
jj++, n++;
}
// copy any remaining values in row i into the COO
while(jj < src.row_offsets[i+1])
{
dst.coo.row_indices[coo_nnz] = i;
dst.coo.column_indices[coo_nnz] = src.column_indices[jj];
dst.coo.values[coo_nnz] = src.values[jj];
jj++; coo_nnz++;
}
}
}
template <typename Matrix1, typename Matrix2>
void csr_to_ell(const Matrix1& src, Matrix2& dst,
const size_t num_entries_per_row, const size_t alignment = 32)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
// compute number of nonzeros
size_t num_entries = 0;
#pragma omp parallel for reduction( +: num_entries)
for(size_t i = 0; i < src.num_rows; i++)
num_entries += thrust::min<size_t>(num_entries_per_row, src.row_offsets[i+1] - src.row_offsets[i]);
dst.resize(src.num_rows, src.num_cols, num_entries, num_entries_per_row, alignment);
const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::host_memory>::invalid_index;
// pad out ELL format with zeros
thrust::fill(dst.column_indices.values.begin(), dst.column_indices.values.end(), invalid_index);
thrust::fill(dst.values.values.begin(), dst.values.values.end(), ValueType(0));
#pragma omp parallel for
for(size_t i = 0; i < src.num_rows; i++)
{
size_t n = 0;
IndexType jj = src.row_offsets[i];
// copy up to num_cols_per_row values of row i into the ELL
while(jj < src.row_offsets[i+1] && n < num_entries_per_row)
{
dst.column_indices(i,n) = src.column_indices[jj];
dst.values(i,n) = src.values[jj];
jj++, n++;
}
}
}
template <typename Matrix1, typename Matrix2>
void csr_to_array2d(const Matrix1& src, Matrix2& dst)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
dst.resize(src.num_rows, src.num_cols);
thrust::fill(dst.values.begin(), dst.values.end(), ValueType(0));
for(size_t i = 0; i < src.num_rows; i++)
for(IndexType jj = src.row_offsets[i]; jj < src.row_offsets[i+1]; jj++)
dst(i, src.column_indices[jj]) += src.values[jj]; //sum duplicates
}
/////////////////////
// DIA Conversions //
/////////////////////
template <typename Matrix1, typename Matrix2>
void dia_to_csr(const Matrix1& src, Matrix2& dst)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
size_t num_entries = 0;
size_t num_diagonals = src.diagonal_offsets.size();
// count nonzero entries
for(size_t i = 0; i < src.num_rows; i++)
{
for(size_t n = 0; n < num_diagonals; n++)
{
const IndexType j = i + src.diagonal_offsets[n];
if(j >= 0 && static_cast<size_t>(j) < src.num_cols && src.values(i,n) != ValueType(0))
num_entries++;
}
}
dst.resize(src.num_rows, src.num_cols, num_entries);
num_entries = 0;
dst.row_offsets[0] = 0;
// copy nonzero entries to CSR structure
for(size_t i = 0; i < src.num_rows; i++)
{
for(size_t n = 0; n < num_diagonals; n++)
{
const IndexType j = i + src.diagonal_offsets[n];
if(j >= 0 && static_cast<size_t>(j) < src.num_cols)
{
const ValueType value = src.values(i, n);
if (value != ValueType(0))
{
dst.column_indices[num_entries] = j;
dst.values[num_entries] = value;
num_entries++;
}
}
}
dst.row_offsets[i + 1] = num_entries;
}
}
/////////////////////
// ELL Conversions //
/////////////////////
template <typename Matrix1, typename Matrix2>
void ell_to_coo(const Matrix1& src, Matrix2& dst)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::host_memory>::invalid_index;
dst.resize(src.num_rows, src.num_cols, src.num_entries);
size_t num_entries = 0;
const size_t num_entries_per_row = src.column_indices.num_cols;
for(size_t i = 0; i < src.num_rows; i++)
{
for(size_t n = 0; n < num_entries_per_row; n++)
{
const IndexType j = src.column_indices(i,n);
const ValueType v = src.values(i,n);
if(j != invalid_index)
{
dst.row_indices[num_entries] = i;
dst.column_indices[num_entries] = j;
dst.values[num_entries] = v;
num_entries++;
}
}
}
}
template <typename Matrix1, typename Matrix2>
void ell_to_csr(const Matrix1& src, Matrix2& dst)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::host_memory>::invalid_index;
dst.resize(src.num_rows, src.num_cols, src.num_entries);
size_t num_entries = 0;
dst.row_offsets[0] = 0;
const size_t num_entries_per_row = src.column_indices.num_cols;
for(size_t i = 0; i < src.num_rows; i++)
{
for(size_t n = 0; n < num_entries_per_row; n++)
{
const IndexType j = src.column_indices(i,n);
const ValueType v = src.values(i,n);
if(j != invalid_index)
{
dst.column_indices[num_entries] = j;
dst.values[num_entries] = v;
num_entries++;
}
}
dst.row_offsets[i + 1] = num_entries;
}
}
/////////////////////
// HYB Conversions //
/////////////////////
template <typename Matrix1, typename Matrix2>
void hyb_to_coo(const Matrix1& src, Matrix2& dst)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
dst.resize(src.num_rows, src.num_cols, src.num_entries);
const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::host_memory>::invalid_index;
const size_t num_entries_per_row = src.ell.column_indices.num_cols;
size_t num_entries = 0;
size_t coo_progress = 0;
// merge each row of the ELL and COO parts into a single COO row
for(size_t i = 0; i < src.num_rows; i++)
{
// append the i-th row from the ELL part
for(size_t n = 0; n < num_entries_per_row; n++)
{
const IndexType j = src.ell.column_indices(i,n);
const ValueType v = src.ell.values(i,n);
if(j != invalid_index)
{
dst.row_indices[num_entries] = i;
dst.column_indices[num_entries] = j;
dst.values[num_entries] = v;
num_entries++;
}
}
// append the i-th row from the COO part
while (coo_progress < src.coo.num_entries && static_cast<size_t>(src.coo.row_indices[coo_progress]) == i)
{
dst.row_indices[num_entries] = i;
dst.column_indices[num_entries] = src.coo.column_indices[coo_progress];
dst.values[num_entries] = src.coo.values[coo_progress];
num_entries++;
coo_progress++;
}
}
}
template <typename Matrix1, typename Matrix2>
void hyb_to_csr(const Matrix1& src, Matrix2& dst)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
dst.resize(src.num_rows, src.num_cols, src.num_entries);
const IndexType invalid_index = cusp::ell_matrix<IndexType, ValueType, cusp::host_memory>::invalid_index;
const size_t num_entries_per_row = src.ell.column_indices.num_cols;
size_t num_entries = 0;
dst.row_offsets[0] = 0;
size_t coo_progress = 0;
// merge each row of the ELL and COO parts into a single CSR row
for(size_t i = 0; i < src.num_rows; i++)
{
// append the i-th row from the ELL part
for(size_t n = 0; n < num_entries_per_row; n++)
{
const IndexType j = src.ell.column_indices(i,n);
const ValueType v = src.ell.values(i,n);
if(j != invalid_index)
{
dst.column_indices[num_entries] = j;
dst.values[num_entries] = v;
num_entries++;
}
}
// append the i-th row from the COO part
while (coo_progress < src.coo.num_entries && static_cast<size_t>(src.coo.row_indices[coo_progress]) == i)
{
dst.column_indices[num_entries] = src.coo.column_indices[coo_progress];
dst.values[num_entries] = src.coo.values[coo_progress];
num_entries++;
coo_progress++;
}
dst.row_offsets[i + 1] = num_entries;
}
}
/////////////////////////
// Array1d Conversions //
/////////////////////////
template <typename Matrix1, typename Matrix2>
void array2d_to_array1d(const Matrix1& src, Matrix2& dst)
{
if (src.num_rows == 0 && src.num_cols == 0)
{
dst.resize(0);
}
else if (src.num_cols == 1)
{
dst.resize(src.num_rows);
for (size_t i = 0; i < src.num_rows; i++)
dst[i] = src(i,0);
}
else if (src.num_rows == 1)
{
dst.resize(src.num_cols);
for (size_t j = 0; j < src.num_cols; j++)
dst[j] = src(0,j);
}
else
{
throw cusp::format_conversion_exception("array2d to array1d conversion is only defined for row or column vectors");
}
}
/////////////////////////
// Array2d Conversions //
/////////////////////////
template <typename Matrix1, typename Matrix2>
void array1d_to_array2d(const Matrix1& src, Matrix2& dst)
{
dst.resize(src.size(),1);
for (size_t i = 0; i < src.size(); i++)
dst(i,0) = src[i];
}
template <typename Matrix1, typename Matrix2>
void array2d_to_coo(const Matrix1& src, Matrix2& dst)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
// count number of nonzero entries in array
size_t nnz = 0;
for(size_t i = 0; i < src.num_rows; i++)
{
for(size_t j = 0; j < src.num_cols; j++)
{
if (src(i,j) != ValueType(0))
nnz++;
}
}
dst.resize(src.num_rows, src.num_cols, nnz);
nnz = 0;
for(size_t i = 0; i < src.num_rows; i++)
{
for(size_t j = 0; j < src.num_cols; j++)
{
if (src(i,j) != ValueType(0))
{
dst.row_indices[nnz] = i;
dst.column_indices[nnz] = j;
dst.values[nnz] = src(i,j);
nnz++;
}
}
}
}
template <typename Matrix1, typename Matrix2>
void array2d_to_csr(const Matrix1& src, Matrix2& dst)
{
typedef typename Matrix2::index_type IndexType;
typedef typename Matrix2::value_type ValueType;
IndexType nnz = src.num_entries - thrust::count(src.values.begin(), src.values.end(), ValueType(0));
dst.resize(src.num_rows, src.num_cols, nnz);
IndexType num_entries = 0;
for(size_t i = 0; i < src.num_rows; i++)
{
dst.row_offsets[i] = num_entries;
for(size_t j = 0; j < src.num_cols; j++)
{
if (src(i,j) != ValueType(0))
{
dst.column_indices[num_entries] = j;
dst.values[num_entries] = src(i,j);
num_entries++;
}
}
}
dst.row_offsets[src.num_rows] = num_entries;
}
} // end namespace host
} // end namespace detail
} // end namespace cusp
|
MatrixFreeSolver.h | #ifndef __MatrixFreeSolver_H__
#define __MatrixFreeSolver_H__
#include <Eigen/Core>
#include <Eigen/Dense>
#include <Eigen/Sparse>
using SystemMatrixType = Eigen::SparseMatrix<Real>;
namespace SPH
{
class MatrixReplacement;
}
namespace Eigen
{
namespace internal
{
template<> struct traits<SPH::MatrixReplacement> : public Eigen::internal::traits<SystemMatrixType> {};
}
}
namespace SPH
{
/** Replacement of the matrix in the linear system which is required for a
* matrix-free solver. */
class MatrixReplacement : public Eigen::EigenBase<MatrixReplacement>
{
public:
// Required typedefs, constants, and method:
typedef Real Scalar;
typedef Real RealScalar;
typedef int StorageIndex;
typedef void(*MatrixVecProdFct) (const Real*, Real*, void *);
enum
{
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic,
IsRowMajor = false
};
Index rows() const { return m_dim; }
Index cols() const { return m_dim; }
template<typename Rhs>
Eigen::Product<MatrixReplacement, Rhs, Eigen::AliasFreeProduct> operator*(const Eigen::MatrixBase<Rhs>& x) const
{
return Eigen::Product<MatrixReplacement, Rhs, Eigen::AliasFreeProduct>(*this, x.derived());
}
MatrixReplacement(const unsigned int dim, MatrixVecProdFct fct, void *userData) : m_dim(dim), m_matrixVecProdFct(fct), m_userData(userData) {}
void * getUserData() { return m_userData; }
MatrixVecProdFct getMatrixVecProdFct() { return m_matrixVecProdFct; }
protected:
unsigned int m_dim;
void *m_userData;
/** matrix vector product callback */
MatrixVecProdFct m_matrixVecProdFct;
};
/** Matrix-free Jacobi preconditioner */
class JacobiPreconditioner1D
{
public:
typedef typename SystemMatrixType::StorageIndex StorageIndex;
typedef void(*DiagonalMatrixElementFct) (const unsigned int, Real&, void *);
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
JacobiPreconditioner1D() {}
void init(const unsigned int dim, DiagonalMatrixElementFct fct, void *userData)
{
m_dim = dim; m_diagonalElementFct = fct; m_userData = userData;
}
Eigen::Index rows() const { return m_dim; }
Eigen::Index cols() const { return m_dim; }
Eigen::ComputationInfo info() { return Eigen::Success; }
template<typename MatType>
JacobiPreconditioner1D& analyzePattern(const MatType&) { return *this; }
template<typename MatType>
JacobiPreconditioner1D& factorize(const MatType& mat) { return *this; }
template<typename MatType>
JacobiPreconditioner1D& compute(const MatType& mat)
{
m_invDiag.resize(m_dim);
#pragma omp parallel default(shared)
{
#pragma omp for schedule(static)
for (int i = 0; i < (int)m_dim; i++)
{
Real res;
m_diagonalElementFct(i, res, m_userData);
m_invDiag[i] = static_cast<Real>(1.0) / res;
}
}
return *this;
}
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
x = m_invDiag.array() * b.array();
}
template<typename Rhs>
inline const Eigen::Solve<JacobiPreconditioner1D, Rhs> solve(const Eigen::MatrixBase<Rhs>& b) const
{
return Eigen::Solve<JacobiPreconditioner1D, Rhs>(*this, b.derived());
}
protected:
unsigned int m_dim;
/** diagonal matrix element callback */
DiagonalMatrixElementFct m_diagonalElementFct;
void *m_userData;
VectorXr m_invDiag;
};
/** Matrix-free Jacobi preconditioner */
class JacobiPreconditioner3D
{
public:
typedef typename SystemMatrixType::StorageIndex StorageIndex;
typedef void(*DiagonalMatrixElementFct) (const unsigned int, Vector3r&, void *);
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
JacobiPreconditioner3D() {}
void init(const unsigned int dim, DiagonalMatrixElementFct fct, void *userData)
{
m_dim = dim; m_diagonalElementFct = fct; m_userData = userData;
}
Eigen::Index rows() const { return 3*m_dim; }
Eigen::Index cols() const { return 3*m_dim; }
Eigen::ComputationInfo info() { return Eigen::Success; }
template<typename MatType>
JacobiPreconditioner3D& analyzePattern(const MatType&) { return *this; }
template<typename MatType>
JacobiPreconditioner3D& factorize(const MatType& mat) { return *this; }
template<typename MatType>
JacobiPreconditioner3D& compute(const MatType& mat)
{
m_invDiag.resize(m_dim*3);
#pragma omp parallel default(shared)
{
#pragma omp for schedule(static)
for (int i = 0; i < (int)m_dim; i++)
{
Vector3r res;
m_diagonalElementFct(i, res, m_userData);
m_invDiag[3*i] = static_cast<Real>(1.0) / res[0];
m_invDiag[3*i+1] = static_cast<Real>(1.0) / res[1];
m_invDiag[3*i+2] = static_cast<Real>(1.0) / res[2];
}
}
return *this;
}
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
x = m_invDiag.array() * b.array();
}
template<typename Rhs>
inline const Eigen::Solve<JacobiPreconditioner3D, Rhs> solve(const Eigen::MatrixBase<Rhs>& b) const
{
return Eigen::Solve<JacobiPreconditioner3D, Rhs>(*this, b.derived());
}
protected:
unsigned int m_dim;
/** diagonal matrix element callback */
DiagonalMatrixElementFct m_diagonalElementFct;
void *m_userData;
VectorXr m_invDiag;
};
/** Matrix-free 3x3 block Jacobi preconditioner */
class BlockJacobiPreconditioner3D
{
public:
typedef typename SystemMatrixType::StorageIndex StorageIndex;
typedef void(*DiagonalMatrixElementFct) (const unsigned int, Matrix3r&, void *);
enum {
ColsAtCompileTime = Eigen::Dynamic,
MaxColsAtCompileTime = Eigen::Dynamic
};
BlockJacobiPreconditioner3D() {}
void init(const unsigned int dim, DiagonalMatrixElementFct fct, void *userData)
{
m_dim = dim; m_diagonalElementFct = fct; m_userData = userData;
}
Eigen::Index rows() const { return 3 * m_dim; }
Eigen::Index cols() const { return 3 * m_dim; }
Eigen::ComputationInfo info() { return Eigen::Success; }
template<typename MatType>
BlockJacobiPreconditioner3D& analyzePattern(const MatType&) { return *this; }
template<typename MatType>
BlockJacobiPreconditioner3D& factorize(const MatType& mat) { return *this; }
template<typename MatType>
BlockJacobiPreconditioner3D& compute(const MatType& mat)
{
m_invDiag.resize(m_dim);
#pragma omp parallel default(shared)
{
#pragma omp for schedule(static)
for (int i = 0; i < (int)m_dim; i++)
{
Matrix3r res;
m_diagonalElementFct(i, res, m_userData);
m_invDiag[i] = res.inverse();
}
}
return *this;
}
template<typename Rhs, typename Dest>
void _solve_impl(const Rhs& b, Dest& x) const
{
#pragma omp parallel default(shared)
{
#pragma omp for schedule(static)
for (int i = 0; i < (int)m_dim; i++)
{
static_cast<VectorXr&>(x).block<3, 1>(3 * i, 0) = m_invDiag[i] * static_cast<const VectorXr&>(b).block<3, 1>(3 * i, 0);
}
}
}
template<typename Rhs>
inline const Eigen::Solve<BlockJacobiPreconditioner3D, Rhs> solve(const Eigen::MatrixBase<Rhs>& b) const
{
return Eigen::Solve<BlockJacobiPreconditioner3D, Rhs>(*this, b.derived());
}
protected:
unsigned int m_dim;
/** diagonal matrix element callback */
DiagonalMatrixElementFct m_diagonalElementFct;
void *m_userData;
std::vector<Matrix3r> m_invDiag;
};
}
namespace Eigen
{
namespace internal
{
using namespace SPH;
/** Implementation of the matrix-free matrix vector product */
template<typename Rhs>
struct generic_product_impl<MatrixReplacement, Rhs, SparseShape, DenseShape, GemvProduct> // GEMV stands for generic matrix-vector
: generic_product_impl_base<MatrixReplacement, Rhs, generic_product_impl<MatrixReplacement, Rhs> >
{
typedef typename Product<MatrixReplacement, Rhs>::Scalar Scalar;
template<typename Dest>
static void scaleAndAddTo(Dest& dst, const MatrixReplacement& lhs, const Rhs& rhs, const Scalar& alpha)
{
// This method should implement "dst += alpha * lhs * rhs" inplace,
// however, for iterative solvers, alpha is always equal to 1, so let's not bother about it.
assert(alpha == Scalar(1) && "scaling is not implemented");
const Real *vec = &rhs(0);
Real *res = &dst(0);
MatrixReplacement& lhs_ = const_cast<MatrixReplacement&>(lhs);
lhs_.getMatrixVecProdFct()(vec, res, lhs_.getUserData());
}
};
}
}
#endif
|
GB_binop__lxor_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint32)
// A*D function (colscale): GB (_AxD__lxor_uint32)
// D*A function (rowscale): GB (_DxB__lxor_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint32)
// C=scalar+B GB (_bind1st__lxor_uint32)
// C=scalar+B' GB (_bind1st_tran__lxor_uint32)
// C=A+scalar GB (_bind2nd__lxor_uint32)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT32 || GxB_NO_LXOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
2symbol_new.h | ////
//// Created by nikita on 30.09.2020.
////
//
//#ifndef CPU_2SYMBOL_NEW_H
//#define CPU_2SYMBOL_NEW_H
//
//
//
//
//#include <vector>
//#include <cmath>
//#include <bitset>
//
//
//template<class Input>
//inline void process_cubes_antidiag_bin(int lower_bound, int upper_bound, int left_edge, int top_edge,
// Input braid_ones,
// Input *bitset_left_strand_map,
// Input *bitset_top_strand_map,
// Input *a_reverse, Input *b) {
//
// for (int j = lower_bound; j < upper_bound; ++j) {
// Input left_cap, symbols, combing_condition, rev_combing_cond, top_strand_shifted;
//
// Input left_strand = bitset_left_strand_map[left_edge + j];
// Input top_strand = bitset_top_strand_map[top_edge + j];
// Input symbol_a = a_reverse[left_edge + j];
// Input symbol_b = b[top_edge + j];
//
// int rev_counter = (sizeof(Input) * 8 - 2);
// Input mask = Input(1);
//// Input mask_r = Input(1) << rev_counter;
//
//
// // upper half
//#pragma GCC unroll 128
// for (int inside_diag_num = 0; inside_diag_num < sizeof(Input) * 8 / 2 - 1; ++inside_diag_num) {
// left_cap = left_strand >> rev_counter;
// symbols = ~(((symbol_a >> rev_counter)) ^ symbol_b);
// symbols &= (symbols >> 1) & braid_ones;
// combing_condition = mask & (symbols | (((~(left_cap)) & top_strand)));
// rev_combing_cond = combing_condition ^ braid_ones;
//
// if (combing_condition) {
// top_strand_shifted = top_strand << rev_counter;
// top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_cap);
//
// combing_condition <<= rev_counter;
// rev_combing_cond = combing_condition ^ braid_ones;
//
// left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted);
// }
//
//
// rev_counter -= 2;
// mask = (mask << 2) | Input(1);
//// mask_r = mask_r | (mask_r >> 2);
// }
//
// // center
// symbols = (~(symbol_a ^ symbol_b));
// symbols &= (symbols >> 1) & braid_ones;
// combing_condition = (symbols | ((~left_strand) & top_strand));
// rev_combing_cond = combing_condition ^ braid_ones;
// if (combing_condition) {
// top_strand_shifted = top_strand;
// top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_strand);
// left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted);
// }
//
//
// mask = braid_ones;
//// mask_r = braid_ones;
//
// //lower half
//#pragma GCC unroll 128
// for (int inside_diag_num = 0; inside_diag_num < sizeof(Input) * 8 / 2 - 1; ++inside_diag_num) {
// mask <<= 2;
//// mask_r >>= 2;
//
// left_cap = left_strand << (2 * (inside_diag_num + 1));
// symbols = ~(((symbol_a << (2 * (inside_diag_num + 1)))) ^ symbol_b);
// symbols &= (symbols >> 1) & braid_ones;
//
// combing_condition = mask & (symbols | (((~(left_cap)) & top_strand)));
// rev_combing_cond = combing_condition ^ braid_ones;
//
// if (combing_condition) {
// top_strand_shifted = top_strand >> (2 * (inside_diag_num + 1));
// top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_cap);
//// symbols = ~(((symbol_a)) ^ (symbol_b >> (2 * (inside_diag_num + 1))));
//// symbols &= (symbols >> 1) & braid_ones;
//
//// combing_condition = mask_r & (symbols | ((~(left_strand) & top_strand_shifted)));
// combing_condition >>= (2 * (inside_diag_num + 1));
// rev_combing_cond = combing_condition ^ braid_ones;
//
//
// left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted);
// }
// }
//
//
// bitset_left_strand_map[left_edge + j] = left_strand;
//
// bitset_top_strand_map[top_edge + j] = top_strand;
// }
//}
//
//template<class Input>
//inline void process_cubes_antidiag_mpi_bin(int lower_bound, int upper_bound, int left_edge, int top_edge,
// Input *bitset_left_strand_map,
// Input *bitset_top_strand_map,
// Input *a_reverse, Input *b) {
//
// const int upper = sizeof(Input) * 8 - 1;
//
//#pragma omp for simd schedule(static) aligned(bitset_top_strand_map, bitset_left_strand_map, a_reverse, b:sizeof(Input)*8)
// for (int j = lower_bound; j < upper_bound; ++j) {
// Input left_cap, symbols, combing_condition, rev_combing_cond, top_strand_shifted;
// Input left_strand = bitset_left_strand_map[left_edge + j];
// Input top_strand = bitset_top_strand_map[top_edge + j];
// Input symbol_a = a_reverse[left_edge + j];
// Input symbol_b = b[top_edge + j];
//
// Input mask = Input(1);
//
//
// // upper half
//#pragma GCC unroll 256
// for (int rev_counter = (sizeof(Input) * 8 - 1); rev_counter > 0; rev_counter--) {
// left_cap = left_strand >> rev_counter;
// symbols = ~(((symbol_a >> rev_counter)) ^ symbol_b);
// combing_condition = mask & (symbols | (((~(left_cap)) & top_strand)));
// rev_combing_cond = ~combing_condition;
//
// top_strand_shifted = top_strand << rev_counter;
// top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_cap);
//
// combing_condition <<= rev_counter;
// rev_combing_cond = ~combing_condition;
//
// left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted);
//
// mask = (mask << 1) | Input(1);
// }
//
// // center
// symbols = (~(symbol_a ^ symbol_b));
//// symbols &= (symbols >> 1) & braid_ones;
// combing_condition = (symbols | ((~left_strand) & top_strand));
// rev_combing_cond = ~combing_condition;
// top_strand_shifted = top_strand;
// top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_strand);
// left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted);
//
// mask = ~Input(0);
//
// //lower half
//#pragma GCC unroll 256
// for (int inside_diag_num = 1; inside_diag_num < upper + 1; inside_diag_num++) {
// mask <<= 1;
//
// left_cap = left_strand << (inside_diag_num);
// symbols = ~(((symbol_a << inside_diag_num)) ^ symbol_b);
//// symbols &= (symbols >> 1) & braid_ones;
//
// combing_condition = mask & (symbols | (((~(left_cap)) & top_strand)));
// rev_combing_cond = ~combing_condition;
//
// top_strand_shifted = top_strand >> ((inside_diag_num));
// top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_cap);
// combing_condition >>= ((inside_diag_num));
// rev_combing_cond = ~combing_condition;
//
//
// left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted);
// }
//
//
// bitset_left_strand_map[left_edge + j] = left_strand;
// bitset_top_strand_map[top_edge + j] = top_strand;
// }
//}
//
//
//template<class Input>
//inline void process_cube_with_exception_bin(int left_edge, int top_edge, int j, Input braid_ones, Input l_active_mask,
// Input r_active_mask,
// Input *bitset_left_strand_map, Input *bitset_top_strand_map,
// Input *a_reverse,
// Input *b) {
//
// Input left_cap, symbols, combing_condition, rev_combing_cond, top_strand_shifted;
//
// Input left_strand = bitset_left_strand_map[left_edge + j];
// Input top_strand = bitset_top_strand_map[top_edge + j];
// Input symbol_a = a_reverse[left_edge + j];
// Input symbol_b = b[top_edge + j];
//
// int rev_counter = (sizeof(Input) * 8 - 2);
// Input mask = Input(1);
// Input mask_r = Input(1) << rev_counter;
//
//
// // upper half
// for (int inside_diag_num = 0; inside_diag_num < sizeof(Input) * 8 / 2; ++inside_diag_num) {
// left_cap = left_strand >> rev_counter;
// symbols = ~(((symbol_a >> rev_counter)) ^ symbol_b);
// symbols &= (symbols >> 1) & braid_ones;
// combing_condition =
// r_active_mask & (l_active_mask >> rev_counter) & mask & (symbols | (((~(left_cap)) & top_strand)));
// rev_combing_cond = combing_condition ^ braid_ones;
//
// if (combing_condition) {
// top_strand_shifted = top_strand << rev_counter;
// top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_cap);
//
// symbols = ~(((symbol_a)) ^ (symbol_b << rev_counter));
// symbols &= (symbols >> 1) & braid_ones;
// combing_condition = (r_active_mask << rev_counter) & l_active_mask & mask_r &
// (symbols | ((~(left_strand) & top_strand_shifted)));
// rev_combing_cond = combing_condition ^ braid_ones;
//
// left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted);
// }
//
//
// rev_counter -= 2;
// mask = (mask << 2) | Input(1);
// mask_r = mask_r | (mask_r >> 2);
// }
//
// // center
// symbols = (~(symbol_a ^ symbol_b));
// symbols &= (symbols >> 1) & braid_ones;
// combing_condition = l_active_mask & r_active_mask & (symbols | ((~left_strand) & top_strand));
// rev_combing_cond = combing_condition ^ braid_ones;
// if (combing_condition) {
// top_strand_shifted = top_strand;
// top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_strand);
// left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted);
// }
//
//
// mask = braid_ones;
// mask_r = braid_ones;
//
// //lower half
// for (int inside_diag_num = 0; inside_diag_num < sizeof(Input) * 8 / 2; ++inside_diag_num) {
// mask <<= 2;
// mask_r >>= 2;
//
// left_cap = left_strand << (2 * (inside_diag_num + 1));
// symbols = ~(((symbol_a << (2 * (inside_diag_num + 1)))) ^ symbol_b);
// symbols &= (symbols >> 1) & braid_ones;
//
// combing_condition = r_active_mask & (l_active_mask << (2 * (inside_diag_num + 1))) & mask &
// (symbols | (((~(left_cap)) & top_strand)));
// rev_combing_cond = combing_condition ^ braid_ones;
//
// if (combing_condition) {
// top_strand_shifted = top_strand >> (2 * (inside_diag_num + 1));
// top_strand = (rev_combing_cond & top_strand) | (combing_condition & left_cap);
// symbols = ~(((symbol_a)) ^ (symbol_b >> (2 * (inside_diag_num + 1))));
// symbols &= (symbols >> 1) & braid_ones;
//
// combing_condition = (r_active_mask >> (2 * (inside_diag_num + 1))) & l_active_mask & mask_r &
// (symbols | ((~(left_strand) & top_strand_shifted)));
// rev_combing_cond = combing_condition ^ braid_ones;
//
// left_strand = (rev_combing_cond & left_strand) | (combing_condition & top_strand_shifted);
// }
// }
//
//
// bitset_left_strand_map[left_edge + j] = left_strand;
// bitset_top_strand_map[top_edge + j] = top_strand;
//
//}
//
//
//template<class Input>
//int prefix_lcs_via_braid_bits_2symbol_v2_full_mask(Input *a_reverse, int a_size, int a_total_symbols,
// Input *b, int b_size, int b_total_symbols, int threads_num) {
//
//
// Input *bitset_left_strand_map = static_cast<Input *> (aligned_alloc(sizeof(Input), sizeof(Input) * a_size));
// Input *bitset_top_strand_map = static_cast<Input *> (aligned_alloc(sizeof(Input), sizeof(Input) * b_size));
//
//
// auto m = a_size, n = b_size;
//
// int dis_braid = 0;
// auto num_diag = m + n - 1;
// auto total_same_length_diag = num_diag - (m - 1) - (m - 1);
//
// Input braid_ones = ~Input(0);
//
//
//#pragma omp parallel num_threads(threads_num) default(none) shared(bitset_left_strand_map, bitset_top_strand_map, a_reverse, b, m, n, dis_braid, total_same_length_diag, braid_ones)
// {
//
//#pragma omp for simd schedule(static) aligned(bitset_left_strand_map:sizeof(Input)*8)
// for (int k = 0; k < n; ++k) {
// bitset_top_strand_map[k] = Input(0);
// }
//
//#pragma omp for simd schedule(static) aligned(bitset_left_strand_map:sizeof(Input)*8)
// for (int k = 0; k < m; ++k) {
// bitset_left_strand_map[k] = braid_ones;
// }
//
// for (int diag_len = 0; diag_len < m - 1; diag_len++) {
// process_cubes_antidiag_mpi_bin(0, diag_len + 1, m - 1 - diag_len, 0, bitset_left_strand_map,
// bitset_top_strand_map, a_reverse, b);
//
// }
//
// for (int k = 0; k < total_same_length_diag; k++) {
// process_cubes_antidiag_mpi_bin(0, m, 0, k, bitset_left_strand_map,
// bitset_top_strand_map, a_reverse, b);
// }
//
// auto start_j = total_same_length_diag;
//
// for (int diag_len = m - 1; diag_len >= 1; diag_len--) {
// process_cubes_antidiag_mpi_bin(0, diag_len, 0, start_j, bitset_left_strand_map,
// bitset_top_strand_map, a_reverse, b);
// start_j++;
// }
//
//#pragma omp for simd schedule(static) reduction(+:dis_braid) aligned(bitset_top_strand_map, bitset_left_strand_map, a_reverse, b:sizeof(Input)*8)
// for (int i1 = 0; i1 < m; ++i1) {
// // Brian Kernighan’s Algorithm
// int counter = 0;
// Input number = bitset_left_strand_map[i1];
// // LogNumber
// while (number) {
// number &= (number - 1);
// counter++;
// }
// dis_braid += counter;
// }
//
// }
//
//
// free(bitset_left_strand_map);
// free(bitset_top_strand_map);
//
// return a_total_symbols - dis_braid;
//
//}
//
//
//#endif //CPU_2SYMBOL_NEW_H
|
GB_binop__gt_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__gt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__gt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint8)
// A*D function (colscale): GB (_AxD__gt_uint8)
// D*A function (rowscale): GB (_DxB__gt_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint8)
// C=scalar+B GB (_bind1st__gt_uint8)
// C=scalar+B' GB (_bind1st_tran__gt_uint8)
// C=A+scalar GB (_bind2nd__gt_uint8)
// C=A'+scalar GB (_bind2nd_tran__gt_uint8)
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_UINT8 || GxB_NO_GT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__gt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
bistack.c | // -*-Mode: C++;-*- // technically C99
// * BeginRiceCopyright *****************************************************
//
// $HeadURL$
// $Id$
//
// --------------------------------------------------------------------------
// Part of HPCToolkit (hpctoolkit.org)
//
// Information about sources of support for research and development of
// HPCToolkit is at 'hpctoolkit.org' and in 'README.Acknowledgments'.
// --------------------------------------------------------------------------
//
// Copyright ((c)) 2002-2021, Rice University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of Rice University (RICE) nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// This software is provided by RICE and contributors "as is" and any
// express or implied warranties, including, but not limited to, the
// implied warranties of merchantability and fitness for a particular
// purpose are disclaimed. In no event shall RICE or contributors be
// liable for any direct, indirect, incidental, special, exemplary, or
// consequential damages (including, but not limited to, procurement of
// substitute goods or services; loss of use, data, or profits; or
// business interruption) however caused and on any theory of liability,
// whether in contract, strict liability, or tort (including negligence
// or otherwise) arising in any way out of the use of this software, even
// if advised of the possibility of such damage.
//
// ******************************************************* EndRiceCopyright *
//*****************************************************************************
// local includes
//*****************************************************************************
#include "bistack.h"
//*****************************************************************************
// macros
//*****************************************************************************
#define Ap(s) &s.aptr
//*****************************************************************************
// interface functions
//*****************************************************************************
void
bistack_init
(
bistack_t *s
)
{
atomic_init(Ap(s->produced), 0);
atomic_init(Ap(s->to_consume), 0);
}
void
bistack_push
(
bistack_t *s,
s_element_t *e
)
{
cstack_push(&s->produced, e);
}
s_element_t *
bistack_pop
(
bistack_t *s
)
{
// use sstack protocol for private consumer stack
s_element_t *e = sstack_pop(&s->to_consume);
return e;
}
void
bistack_reverse
(
bistack_t *s
)
{
sstack_reverse(&s->to_consume);
}
void
bistack_steal
(
bistack_t *s
)
{
if (atomic_load_explicit(Ap(s->produced), memory_order_relaxed) != NULL) {
s_element_t *tmp = cstack_steal(&s->produced);
atomic_store_explicit(Ap(s->to_consume), tmp, memory_order_relaxed);
}
}
//*****************************************************************************
// unit test
//*****************************************************************************
#define UNIT_TEST 0
#if UNIT_TEST
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <unistd.h>
typedef struct {
s_element_ptr_t next;
int value;
} typed_stack_elem(int); //int_q_element_t
typedef s_element_ptr_t typed_stack_elem_ptr(int); //int_q_elem_ptr_t
typedef bistack_t typed_bistack(int);
//typed_queue_elem_ptr(int) queue;
typed_bistack(int) pair;
typed_bistack_impl(int)
typed_stack_elem(int) *
typed_stack_elem_fn(int,new)(int value)
{
typed_stack_elem(int) *e =
(typed_stack_elem(int)* ) malloc(sizeof(int_s_element_t));
e->value = value;
cstack_ptr_set(&e->next, 0);
}
void
pop
(
int n
)
{
int i;
for(i = 0; i < n; i++) {
typed_stack_elem(int) *e = typed_bistack_pop(int)(&pair);
if (e == 0) {
printf("%d queue empty\n", omp_get_thread_num());
break;
} else {
printf("%d popping %d\n", omp_get_thread_num(), e->value);
}
}
}
void
push
(
int min,
int n
)
{
int i;
for(i = min; i < min + n; i++) {
printf("%d pushing %d\n", omp_get_thread_num(), i);
typed_bistack_push(int)(&pair, typed_stack_elem_fn(int, new)(i));
}
}
void
steal
(
)
{
typed_bistack_steal(int)(&pair);
}
#ifdef DUMP_UNORDERED_STACK
void
dump
(
int_s_element_t *e
)
{
int i;
for(; e;
e = (int_s_element_t *) typed_stack_elem_ptr_get(int,cstack)(&e->next)) {
printf("%d stole %d\n", omp_get_thread_num(), e->value);
}
}
#endif
int
main
(
int argc,
char **argv
)
{
bistack_init(&pair);
#pragma omp parallel num_threads(6)
{
if (omp_get_thread_num() != 5 ) push(0, 30);
if (omp_get_thread_num() == 5 ) {
sleep(3);
steal();
pop(10);
}
if (omp_get_thread_num() != 5 ) push(100, 12);
// pop(100);
// int_bis_element_t *e = typed_bistack_steal(int, qtype)(&queue);
//dump(e);
if (omp_get_thread_num() != 5 ) push(300, 30);
//typed_queue_
if (omp_get_thread_num() == 5 ) {
sleep(1);
steal();
pop(100);
}
}
}
#endif
|
mt_main.c | #include "incs.h"
#include "node_struct.h"
#include "sclr_eval.h"
#include "make_fake_data.h"
#include "make_fake_tree.h"
// TODO Keep following in sync with .c file
extern void vctr_eval_isp(int32_t * nH, int32_t * nT, float * X, int32_t m, int32_t n, struct _orig_node_t * dt, int32_t n_dt, int32_t depth);
#include "get_time_usec.h"
config_t g_C;
int
main(
int argc,
char **argv
)
{
int status = 0;
// start configuration parameters
int depth = 16;
int num_features = 64;
int num_instances = 4 * 1048576;
int nDT = 4; // number of parallel decision trees to evaluate
int nP = 4; // number of cores used
//--------------------
orig_node_t **p_dt = NULL; // [nDT][n_dt]
int *p_n_dt = NULL;
float *X = NULL; // [num_features * num_instances]
int **p_nH = NULL; // [nDT][num_instances]
int **p_nT = NULL; // [nDT][num_instances]
//-------------------
// read (or generate) decision tree
memset(&g_C, 0, sizeof(config_t));
p_dt = malloc(nDT * sizeof(node_t *));
return_if_malloc_failed(p_dt);
memset(p_dt, 0, nDT * sizeof(node_t *));
p_n_dt = malloc(nDT * sizeof(int));
return_if_malloc_failed(p_n_dt);
memset(p_n_dt, 0, nDT * sizeof(int));
for ( int i = 0; i < nDT; i++ ) {
status = make_fake_tree(depth, num_features, &(p_dt[i]), &(p_n_dt[i]));
cBYE(status);
}
// read (or generate) data
status = make_fake_data(num_features, num_instances, &X);
cBYE(status);
// create space for results
p_nH = malloc(nDT * sizeof(int *));
return_if_malloc_failed(p_nH);
for ( int i = 0; i < nDT; i++ ) {
p_nH[i] = malloc(num_instances * sizeof(int));
return_if_malloc_failed(p_nH[i]);
}
p_nT = malloc(nDT * sizeof(int *));
return_if_malloc_failed(p_nT);
for ( int i = 0; i < nDT; i++ ) {
p_nT[i] = malloc(num_instances * sizeof(int));
return_if_malloc_failed(p_nT[i]);
}
// perform inferencing
printf("n = %d \n", num_instances);
printf("m = %d \n", num_features);
printf("d = %d \n", depth);
printf("nDT = %d \n", nDT);
printf("nP = %d \n", nP);
uint64_t t_start = get_time_usec();
#pragma omp parallel for num_threads(nP)
for ( int i = 0; i < nDT; i++ ) {
int l_status = sclr_eval(p_nH[i], p_nT[i], X,
num_features, num_instances, p_dt[i], p_n_dt[i], depth);
if ( l_status < 0 ) { status = l_status; }
}
cBYE(status);
uint64_t t_stop = get_time_usec();
printf("T (scalar) = %lf\n", (t_stop - t_start)/1000000.0);
t_start = get_time_usec();
#pragma omp parallel for num_threads(nP)
for ( int i = 0; i < nDT; i++ ) {
vctr_eval_isp(p_nH[i], p_nT[i], X, num_features, num_instances,
p_dt[i], p_n_dt[i], depth);
}
t_stop = get_time_usec();
printf("T (vector) = %lf\n", (t_stop - t_start)/1000000.0);
BYE:
free_if_non_null(X);
if ( p_nH != NULL ) {
for (int i = 0; i < nDT; i++ ) {
free_if_non_null(p_nH[i]);
}
free_if_non_null(p_nH);
}
if ( p_nT != NULL ) {
for (int i = 0; i < nDT; i++ ) {
free_if_non_null(p_nT[i]);
}
free_if_non_null(p_nT);
}
if ( p_dt != NULL ) {
for (int i = 0; i < nDT; i++ ) {
free_if_non_null(p_dt[i]);
}
free_if_non_null(p_dt);
}
free_if_non_null(p_n_dt);
return status;
}
|
ScatterHelper.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <pointercast.h>
#include <op_boilerplate.h>
#include <NDArray.h>
#include <numeric>
namespace nd4j {
namespace ops {
template <typename T>
class ScatterHelper {
public:
template <typename OpClass>
static FORCEINLINE Nd4jStatus scatterApply(NDArray<T>* output, NDArray<T>* indices, NDArray<T>* updates) {
NDArray<T>* input = output;
int indicesLength = (int) indices->lengthOf();
if ((indices->isVector() && input->isVector() && updates->isVector()) ||
(input->isScalar() && input->isScalar() && updates->isScalar()) ||
(input->isVector() && indices->isScalar() && updates->isScalar()) ) {
for (int e = 0; e < indicesLength; e++) {
int idx = (int) indices->getScalar(e);
T t0 = input->getScalar(idx);
T t1 = updates->getScalar(e);
output->putScalar(idx, OpClass::op(t0, t1, nullptr));
}
return Status::OK();
} else if (indices->isVector() || indices->isScalar()) {
std::vector<int> idc;
std::vector<int> idcU;
for (int e = 0; e < indicesLength; e++) {
idc.push_back((int) indices->getScalar(e));
idcU.push_back(e);
}
std::vector<int> tadDimension = ShapeUtils<T>::convertAxisToTadTarget(input->rankOf(), {0});
auto tadsOperand = output->multipleTensorsAlongDimension(idc, tadDimension);
auto tadsUpdate = updates->multipleTensorsAlongDimension(idcU, tadDimension);
auto z0 = tadsOperand->at(0);
auto z1 = tadsUpdate->at(0);
REQUIRE_TRUE(z0->isSameShape(z1), 0, "scatter_add: updates shapes should match");
for (int e = 0; e < tadsOperand->size(); e++) {
auto t0 = tadsOperand->at(e);
auto t1 = tadsUpdate->at(e);
t0->template applyPairwiseTransform<OpClass>(t1, nullptr);
}
delete tadsOperand;
delete tadsUpdate;
return Status::OK();
} else if (indices->isMatrix() || indices->rankOf() >= 2) {
auto _input = input->reshape(input->ordering(), {input->sizeAt(0), -1});
auto _updates = updates->reshape(updates->ordering(), {indicesLength, (int) updates->lengthOf() / indicesLength});
auto tadsOperand = _input->allTensorsAlongDimension({1});
auto tadsUpdates = _updates->allTensorsAlongDimension({1});
for (int e = 0; e < indicesLength; e++) {
int idx = indices->getScalar(e);
auto t0 = tadsOperand->at(idx);
auto t1 = tadsUpdates->at(e);
t0->template applyPairwiseTransform<OpClass>(t1, nullptr);
}
delete _input;
delete _updates;
delete tadsOperand;
delete tadsUpdates;
return Status::OK();
}
return Status::THROW("ScatterHelper failed");
}
////////////////////////////////////////////////////////////////////////
template <typename OpClass>
static FORCEINLINE void scatter(const NDArray<T>& indices, const NDArray<T>& updates, NDArray<T>& output) {
const int outRank = output.rankOf();
int indRank = indices.rankOf();
const int updRank = updates.rankOf();
const Nd4jLong indLen = indices.lengthOf();
if(outRank == 1) {
#pragma omp parallel for if(indLen > Environment::getInstance()->elementwiseThreshold()) schedule(guided)
for(Nd4jLong i = 0; i < indLen; ++i) {
T& out = output(indices(i));
#pragma omp critical
out = OpClass::op(out, updates(i), nullptr);
}
}
else { // outRank > 1
if(outRank == updRank && indices.isVector())
indRank = 1;
std::vector<int> dimsToExcludeUpd(indRank ? indRank : 1);
std::iota(dimsToExcludeUpd.begin(), dimsToExcludeUpd.end(), 0);
std::vector<Nd4jLong> idxRangesOut(2 * output.rankOf());
std::vector<Nd4jLong> idxRangesUpd(2 * updates.rankOf());
// #pragma omp parallel for if(indLen > Environment::getInstance()->elementwiseThreshold()) schedule(guided) firstprivate(idxRangesOut, idxRangesUpd) // causes known openMP asan bug !
#pragma omp parallel for schedule(guided) firstprivate(idxRangesOut, idxRangesUpd)
for(Nd4jLong i = 0; i < indLen; ++i) {
ShapeUtils<T>::evalIdxRangesForSubArr(indices(i), output.getShapeInfo(), {0}, idxRangesOut.data());
ShapeUtils<T>::evalIdxRangesForSubArr(i, updates.getShapeInfo(), dimsToExcludeUpd, idxRangesUpd.data());
NDArray<T> outSubArr = output(idxRangesOut.data());
NDArray<T> updSubArr = updates(idxRangesUpd.data());
#pragma omp critical
outSubArr.template applyPairwiseTransform<OpClass>(&updSubArr, nullptr);
}
}
}
};
// CORRECT VERSION IS BELOW!!!!!!!!!!!!!!!!!!!!!!!!
////////////////////////////////////////////////////////////////////////
/* template <typename OpClass>
static FORCEINLINE void scatter(const NDArray<T>& indices, const NDArray<T>& updates, NDArray<T>& output) {
const int outRank = output.rankOf();
const int indRank = indices.rankOf();
const int updRank = updates.rankOf();
const Nd4jLong indLen = indices.lengthOf();
if(outRank == 1) {
#pragma omp parallel for if(indLen > Environment::getInstance()->elementwiseThreshold()) schedule(guided)
for(Nd4jLong i = 0; i < indLen; ++i) {
T& out = output(indices(i));
#pragma omp critical
out = OpClass::op(out, updates(i), nullptr);
}
}
else { // outRank > 1
std::vector<int> dimsToExcludeUpd(indRank ? indRank : 1);
std::iota(dimsToExcludeUpd.begin(), dimsToExcludeUpd.end(), 0);
std::vector<Nd4jLong> idxRangesOut(2 * output.rankOf());
std::vector<Nd4jLong> idxRangesUpd(2 * updates.rankOf());
// #pragma omp parallel for if(indLen > Environment::getInstance()->elementwiseThreshold()) schedule(guided) firstprivate(idxRangesOut, idxRangesUpd) // causes known openMP asan bug !
#pragma omp parallel for schedule(guided) firstprivate(idxRangesOut, idxRangesUpd)
for(Nd4jLong i = 0; i < indLen; ++i) {
ShapeUtils<T>::evalIdxRangesForSubArr(indices(i), output.getShapeInfo(), {0}, idxRangesOut.data());
ShapeUtils<T>::evalIdxRangesForSubArr(i, updates.getShapeInfo(), dimsToExcludeUpd, idxRangesUpd.data());
NDArray<T> outSubArr = output(idxRangesOut.data());
NDArray<T> updSubArr = updates(idxRangesUpd.data());
#pragma omp critical
outSubArr.template applyPairwiseTransform<OpClass>(&updSubArr, nullptr);
}
}
}
};
*/
}
} |
GB_unaryop__minv_int16_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int16_uint32
// op(A') function: GB_tran__minv_int16_uint32
// C type: int16_t
// A type: uint32_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 16)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 16) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT16 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int16_uint32
(
int16_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int16_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
crop_and_resize.c | #include <TH/TH.h>
#include <stdio.h>
#include <math.h>
void CropAndResizePerBox(
const float * image_data,
const int batch_size,
const int depth,
const int image_height,
const int image_width,
const float * boxes_data,
const int * box_index_data,
const int start_box,
const int limit_box,
float * corps_data,
const int crop_height,
const int crop_width,
const float extrapolation_value
) {
const int image_channel_elements = image_height * image_width;
const int image_elements = depth * image_channel_elements;
const int channel_elements = crop_height * crop_width;
const int crop_elements = depth * channel_elements;
int b;
#pragma omp parallel for
for (b = start_box; b < limit_box; ++b) {
const float * box = boxes_data + b * 4;
const float y1 = box[0];
const float x1 = box[1];
const float y2 = box[2];
const float x2 = box[3];
const int b_in = box_index_data[b];
if (b_in < 0 || b_in >= batch_size) {
printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size);
exit(-1);
}
const float height_scale =
(crop_height > 1)
? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y)
{
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1)
{
for (int x = 0; x < crop_width; ++x)
{
for (int d = 0; d < depth; ++d)
{
// crops(b, y, x, d) = extrapolation_value;
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value;
}
}
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x)
{
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1)
{
for (int d = 0; d < depth; ++d)
{
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value;
}
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d)
{
const float *pimage = image_data + b_in * image_elements + d * image_channel_elements;
const float top_left = pimage[top_y_index * image_width + left_x_index];
const float top_right = pimage[top_y_index * image_width + right_x_index];
const float bottom_left = pimage[bottom_y_index * image_width + left_x_index];
const float bottom_right = pimage[bottom_y_index * image_width + right_x_index];
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom =
bottom_left + (bottom_right - bottom_left) * x_lerp;
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = top + (bottom - top) * y_lerp;
}
} // end for x
} // end for y
} // end for b
}
void crop_and_resize_forward(
THFloatTensor * image,
THFloatTensor * boxes, // [y1, x1, y2, x2]
THIntTensor * box_index, // range in [0, batch_size)
const float extrapolation_value,
const int crop_height,
const int crop_width,
THFloatTensor * crops
) {
const int batch_size = THFloatTensor_size(image, 0);
const int depth = THFloatTensor_size(image, 1);
const int image_height = THFloatTensor_size(image, 2);
const int image_width = THFloatTensor_size(image, 3);
const int num_boxes = THFloatTensor_size(boxes, 0);
// init output space
THFloatTensor_resize4d(crops, num_boxes, depth, crop_height, crop_width);
THFloatTensor_zero(crops);
// crop_and_resize for each box
CropAndResizePerBox(
THFloatTensor_data(image),
batch_size,
depth,
image_height,
image_width,
THFloatTensor_data(boxes),
THIntTensor_data(box_index),
0,
num_boxes,
THFloatTensor_data(crops),
crop_height,
crop_width,
extrapolation_value
);
}
void crop_and_resize_backward(
THFloatTensor * grads,
THFloatTensor * boxes, // [y1, x1, y2, x2]
THIntTensor * box_index, // range in [0, batch_size)
THFloatTensor * grads_image // resize to [bsize, c, hc, wc]
)
{
// shape
const int batch_size = THFloatTensor_size(grads_image, 0);
const int depth = THFloatTensor_size(grads_image, 1);
const int image_height = THFloatTensor_size(grads_image, 2);
const int image_width = THFloatTensor_size(grads_image, 3);
const int num_boxes = THFloatTensor_size(grads, 0);
const int crop_height = THFloatTensor_size(grads, 2);
const int crop_width = THFloatTensor_size(grads, 3);
// n_elements
const int image_channel_elements = image_height * image_width;
const int image_elements = depth * image_channel_elements;
const int channel_elements = crop_height * crop_width;
const int crop_elements = depth * channel_elements;
// init output space
THFloatTensor_zero(grads_image);
// data pointer
const float * grads_data = THFloatTensor_data(grads);
const float * boxes_data = THFloatTensor_data(boxes);
const int * box_index_data = THIntTensor_data(box_index);
float * grads_image_data = THFloatTensor_data(grads_image);
for (int b = 0; b < num_boxes; ++b) {
const float * box = boxes_data + b * 4;
const float y1 = box[0];
const float x1 = box[1];
const float y2 = box[2];
const float x2 = box[3];
const int b_in = box_index_data[b];
if (b_in < 0 || b_in >= batch_size) {
printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size);
exit(-1);
}
const float height_scale =
(crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y)
{
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1)
{
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x)
{
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1)
{
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d)
{
float *pimage = grads_image_data + b_in * image_elements + d * image_channel_elements;
const float grad_val = grads_data[crop_elements * b + channel_elements * d + y * crop_width + x];
const float dtop = (1 - y_lerp) * grad_val;
pimage[top_y_index * image_width + left_x_index] += (1 - x_lerp) * dtop;
pimage[top_y_index * image_width + right_x_index] += x_lerp * dtop;
const float dbottom = y_lerp * grad_val;
pimage[bottom_y_index * image_width + left_x_index] += (1 - x_lerp) * dbottom;
pimage[bottom_y_index * image_width + right_x_index] += x_lerp * dbottom;
} // end d
} // end x
} // end y
} // end b
} |
ejercicio7.c | /*Para compilar usar (-lrt: real time library): gcc -O2 Sumavectores.c -o SumaVectores -lrt
Para ejecutar use: SumaVectores longitud
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include <time.h>
#define PRINTF_ALL
//Sólo puede estar definida una de las tres constantes VECTOR_ (sólo uno de los ...
//tres defines siguientes puede estar descomentado):
//#define VECTOR_LOCAL
// descomentar para que los vectores sean variables ...
// locales (si se supera el tamaño de la pila se ...
// generará el error "Violación de Segmento")
//#define VECTOR_GLOBAL // descomentar para que los vectores sean variables ...
// globales (su longitud no estará limitada por el ...
// tamaño de la pila del programa)
#define VECTOR_DYNAMIC //descomentar para que los vectores sean variables ...
//dinámicas (memoria reautilizable durante la ejecución)
#ifdef VECTOR_GLOBAL
#define MAX 33554432
double v1[MAX], v2[MAX], v3[MAX];
#endif
int main(int argc, char** argv){
int i;
struct timespec cgt1,cgt2;
double ncgt; //para tiempo de ejecución
if(argc<2){
printf("Faltan nº componentes del vector\n");
exit(-1);
}
unsigned int N=atoi(argv[1]);
#ifdef VECTOR_LOCAL
double v1[N], v2[N], v3[N];
#endif
#ifdef VECTOR_GLOBAL
if(N>MAX) N=MAX;
#endif
#ifdef VECTOR_DYNAMIC
double *v1, *v2, *v3;
v1= (double*) malloc(N*sizeof(double));
v2= (double*) malloc(N*sizeof(double));
v3= (double*) malloc(N*sizeof(double));
if((v1==NULL) || (v2==NULL) || (v3==NULL)){
printf("Error en la reserva de espacio para los vectores\n");
exit(-2);
}
#endif
//Inicializar vectores
#pragma omp parallel
{
#pragma omp for
for(i=0;i<N;i++){
v1[i]= N*0.1+i*0.1; v2[i]=N*0.1-i*0.1; //los valores dependen de N
}
}
//clock_gettime(CLOCK_REALTIME,&cgt1);
double a= omp_get_wtime();
//Calcular suma de vectores
#pragma omp parallel for
for(i=0;i<N;i++)
v3[i]=v1[i] + v2[i];
//clock_gettime(CLOCK_REALTIME,&cgt2);
ncgt=(double) (cgt2.tv_sec-cgt1.tv_sec)+ (double) ((cgt2.tv_nsec-cgt1.tv_nsec)/(1.e+9));
//Imprimir resultado de la suma y el tiempo de ejecución
#ifdef PRINTF_ALL
printf("Tiempo(seg.): %11.9f\t / Tamaño Vectores:%u\n",omp_get_wtime()/*ncgt*/,N);
for(i=0;i<N;i++){
printf("thread %d ejecuta la iteración %d del bucle\n",omp_get_thread_num(),i);
printf("Elapsed time: %11.9f\t\n",omp_get_wtime()-a);
printf("/V1[%d]+V2[%d](%8.6f+%8.6f=%8.6f)/\n", i,i,i,v1[i],v2[i],v3[i]);
}
#else
printf("Tiempo: %11.9f\t / Tamaño Vectores:%u\n",
ncgt,N,v1[0],v2[0],v3[0],N-1,N-1,N-1,v1[N-1],v2[N-1],v3[N-1]);
#endif
#ifdef VECTOR_DYNAMIC
free(v1); //libera el espacio reservado para v1
free(v2); //libera el espacio reservado para v2
free(v3); //libera el espacio reservado para v3
#endif
return 0;
}
|
runtime.c | #include "runtime.h"
#if 0
/* Maximum number of simultaneous tasks */
#define MAX_TASK 256
struct rt_s_tasks{
int n_task;
volatile int *task_pool;
} RT_TASK;
#endif
int RT_get_runtime()
{
plasma_context_t *plasma;
plasma = plasma_context_self();
return plasma->runtime;
}
void RT_get_size()
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if ( RT_DEBUG ) {
fprintf(OUTFILE, "PLASMA_SIZE: %d\n", PLASMA_SIZE);
}
}
int RT_get_ws()
{
plasma_context_t *plasma;
plasma = plasma_context_self();
return PLASMA_SIZE;
}
void RT_set_ws(int sze)
{
plasma_context_t *plasma;
plasma = plasma_context_self();
PLASMA_SIZE = sze;
// if ( RT_DEBUG ) {
// fprintf(OUTFILE, "change PLASMA_SIZE to %d\n", sze);
// }
}
void RT_runtime_info()
{
if ( RT_DEBUG ) {
plasma_context_t *plasma;
plasma = plasma_context_self();
fprintf(OUTFILE, "PLASMA_SIZE: %d ", PLASMA_SIZE);
switch (plasma->runtime)
{
case PLASMA_QUARK:
fprintf(OUTFILE, "QUARK in use ");
break;
case PLASMA_OMPSS:
fprintf(OUTFILE, "OmpSs in use ");
break;
default:
fprintf(OUTFILE, "Unknown scheduler ");
}
switch (plasma->scheduling)
{
case PLASMA_STATIC_SCHEDULING:
fprintf(OUTFILE, "Static scheduling ");
break;
case PLASMA_DYNAMIC_SCHEDULING:
fprintf(OUTFILE, "Dynamic scheduling ");
break;
default:
fprintf(OUTFILE, " ");
}
fprintf(OUTFILE, "\n");
}
}
void RT_dynamic_sync()
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
plasma_dynamic_sync();
}
else if(plasma->runtime == PLASMA_OMPSS) {
//#pragma omp taskwait noflush
#pragma omp taskwait
}
}
void RT_dynamic_sync_on(double *ptr)
{
#pragma omp taskwait on(ptr)
}
void RT_CORE_free(Quark *quark, Quark_Task_Flags *task_flags, void *A, int szeA)
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if (plasma->runtime == PLASMA_QUARK) {
QUARK_CORE_free(quark, task_flags, A, szeA);
}
else if (plasma->runtime == PLASMA_OMPSS) {
if (A != NULL) {
printf("");
// #pragma omp task inout([szeA]A)
// free(A);
}
}
}
#pragma omp task inout([sze]A)
void RT_CORE_foo(double *A, int sze)
{
return;
}
#pragma omp task concurrent([szeA]A) inout([szeB]B)
void RT_CORE_foo2(double *A, int szeA, double *B, int szeB)
{
return;
}
int RT_threads_num()
{
int threads_num = omp_get_num_threads();
return threads_num;
}
int RT_global_rank()
{
int grank = omp_get_thread_num();
return grank;
}
#if 0
void RT_init()
{
plasma_context_t *plasma;
plasma = plasma_context_self();
if ( plasma->runtime == PLASMA_OMPSS ) {
RT_TASK.n_task = 0;
RT_TASK.task_pool = malloc(MAX_TASK * sizeof(int));
memset(RT_TASK.task_pool, -1, sizeof(int)*MAX_TASK);
}
}
int RT_task_init()
{
plasma_context_t *plasma;
plasma = plasma_context_self();
int id = 0;
if ( plasma->runtime == PLASMA_OMPSS ) {
int *pool = RT_TASK.task_pool;
#pragma omp critical
{
while ( pool[id] != -1 ) {
id++;
}
if ( id <= MAX_TASK ) {
RT_TASK.n_task++;
pool[id] = 0;
} else {
fprintf(stderr, "RT_TASK full\n");
}
}
}
return id;
}
int RT_local_rank(int t_id)
{
int grank = RT_global_rank();
int lrank;
#pragma omp critical
{
lrank = RT_TASK.task_pool[t_id];
RT_TASK.task_pool[t_id]++;
printf("t_id: %d, grank: %d, lrank: %d\n", t_id, grank, lrank);
}
return lrank;
}
void RT_task_fini(int t_id)
{
#pragma omp critical
{
RT_TASK.task_pool[t_id] = -1;
RT_TASK.n_task--;
}
}
void RT_fini()
{
RT_TASK.n_task = 0;
free(RT_TASK.task_pool);
}
#endif
|
gesummv.c | /**
* gesummv.c: This file was adapted from PolyBench/GPU 1.0 test
* suite to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
#define BENCHMARK_NAME "GESUMMV"
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size. */
#ifdef RUN_POLYBENCH_SIZE
#define SIZE 16384 //4096
#elif RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define N SIZE
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0)
*/
#define ALPHA 43532.0f
#define BETA 12313.0f
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y,
DATA_TYPE *tmp) {
int i, j;
for (i = 0; i < N; i++) {
tmp[i] = 0;
y[i] = 0;
for (j = 0; j < N; j++) {
tmp[i] = A[i * N + j] * x[j] + tmp[i];
y[i] = B[i * N + j] * x[j] + y[i];
}
y[i] = ALPHA * tmp[i] + BETA * y[i];
}
}
void gesummv_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y,
DATA_TYPE *tmp) {
#pragma omp target map(to : A[ : N *N], B[ : N *N], x[ : N], tmp[ : N]) map(tofrom : y[ : N]) device(DEVICE_ID)
#pragma omp teams distribute parallel for
for (int i = 0; i < N; i++) {
tmp[i] = 0;
y[i] = 0;
for (int j = 0; j < N; j++) {
tmp[i] = A[i * N + j] * x[j] + tmp[i];
y[i] = B[i * N + j] * x[j] + y[i];
}
y[i] = ALPHA * tmp[i] + BETA * y[i];
}
}
void init(DATA_TYPE *A, DATA_TYPE *x) {
int i, j;
for (i = 0; i < N; i++) {
x[i] = ((DATA_TYPE)i) / N;
for (j = 0; j < N; j++) {
A[i * N + j] = ((DATA_TYPE)i * j) / N;
}
}
}
int compareResults(DATA_TYPE *y, DATA_TYPE *y_outputFromGpu) {
int i, fail;
fail = 0;
for (i = 0; i < (N); i++) {
if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
int main(int argc, char *argv[]) {
double t_start, t_end;
int fail = 0;
DATA_TYPE *A;
DATA_TYPE *B;
DATA_TYPE *x;
DATA_TYPE *y;
DATA_TYPE *y_outputFromGpu;
DATA_TYPE *tmp;
A = (DATA_TYPE *)malloc(N * N * sizeof(DATA_TYPE));
B = (DATA_TYPE *)malloc(N * N * sizeof(DATA_TYPE));
x = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
y = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
y_outputFromGpu = (DATA_TYPE *)calloc(N, sizeof(DATA_TYPE));
tmp = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE));
//fprintf(stdout, "<< Scalar, Vector and Matrix Multiplication size: %d>>\n", SIZE);
printBenchmarkInfo(BENCHMARK_NAME, SIZE);
init(A, x);
t_start = rtclock();
gesummv_OMP(A, B, x, y_outputFromGpu, tmp);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
gesummv(A, B, x, y, tmp);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(y, y_outputFromGpu);
#endif
free(A);
free(B);
free(x);
free(y);
free(y_outputFromGpu);
free(tmp);
return fail;
}
|
DRB045-doall1-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Simplest one dimension array computation
*/
int a[100];
int main()
{
int i;
int _ret_val_0;
#pragma cetus private(i)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<100; i ++ )
{
a[i]=i;
}
#pragma cetus private(i)
#pragma loop name main#1
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<100; i ++ )
{
a[i]=(a[i]+1);
}
#pragma cetus private(i)
#pragma loop name main#2
for (i=0; i<100; i ++ )
{
printf("%d\n", a[i]);
}
_ret_val_0=0;
return _ret_val_0;
}
|
file.h | /* ================================================================= *
* file.h : Header file with supporting class definitions *
* *
* E-MEM: An efficient (MUMmer-like) tool to retrieve Maximum Exact *
* Matches using hashing based algorithm *
* *
* Copyright (c) 2014, Nilesh Khiste *
* All rights reserved *
* *
* This program is free software: you can redistribute it and/or *
* modify it under the terms of the GNU General Public License as *
* published by the Free Software Foundation, either version 3 of *
* the License, or (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public *
* License along with this program. *
* *
* This file is subject to the terms and conditions defined in the *
* file 'LICENSE', which is part of this source code package. *
* ================================================================= */
using namespace std;
#define CHARS2BITS(x) (2*(x)) //convert char position to bit position
#define DATATYPE_WIDTH 64 // number of bits
#define RANDOM_SEQ_SIZE 10
#define NUM_TMP_FILES 24
class commonData {
public:
static int32_t minMemLen;
static int32_t d;
static int32_t numThreads;
static int32_t kmerSize;
static int32_t ignoreN;
static int32_t fourColOutput;
static int32_t lenInHeader;
static int32_t relQueryPos;
static char nucmer_path[256];
static std::string pfx_path;
};
int32_t commonData::minMemLen=100; // 2 bit representation=50
int32_t commonData::d=1;
int32_t commonData::numThreads=1;
int32_t commonData::kmerSize=56; //2 bit representation = 28
int32_t commonData::ignoreN=0;
int32_t commonData::fourColOutput=0;
int32_t commonData::lenInHeader=0;
int32_t commonData::relQueryPos=0;
char commonData::nucmer_path[256]={'\0'};
std::string commonData::pfx_path;
class seqData {
public:
uint64_t start;
uint64_t end;
std::string seq;
seqData()
{
start=0;
end=0;
};
bool operator ()(const seqData &obj1, const seqData &obj2)
{
return (obj2.start>obj1.end?true:false);
}
};
class mapObject {
public:
uint64_t left;
uint64_t right;
mapObject() {
left=0;
right=0;
}
mapObject(uint64_t x, uint64_t y) {
left=x;
right=y;
}
bool operator()(const uint64_t &x, const mapObject &y)
{
return x < y.left;
}
};
class seqFileReadInfo {
fstream file;
uint64_t size;
string strTmp, strName;
uint64_t binReadSize;
uint64_t binReadsLocation;
uint64_t currPos;
uint64_t numSequences;
string& randomStr()
{
static string str("NNNNNNNNNN");
return str;
}
void processTmpString(uint64_t &sz, uint64_t &blockNCount)
{
string line = strTmp;
strTmp.clear();
totalBases=0;
binReadsLocation=0;
processInput(line, sz, blockNCount);
}
/*
* Function converts a character sequence into an array of integers.
* Input: character string
* Output: array of integers, total number of bases
*/
void processInput(string &str, uint64_t &sz, uint64_t &blockNCount)
{
int chooseLetter=0;
uint64_t k=0;
if (!totalBases) {
for (k=0; k<binReadSize; ++k)
binReads[k]=0;
}
/* Processing the sequences by encoding the base pairs into 2 bits. */
for ( std::string::iterator it=str.begin(); it!=str.end(); ++it)
{
if (totalBases == sz){ //sz=size+minSize
strTmp += *it;
continue;
}else if (totalBases >= size) {
strTmp += *it;
}
switch(*it)
{
case 'A':
case 'a':
binReads[binReadsLocation] <<= 2;
if (commonData::ignoreN && blockNCount){
blockOfNs.push_back(mapObject(CHARS2BITS(blockNCount-1), CHARS2BITS(totalBases-1)));
blockNCount=0;
}
break;
case 'C':
case 'c':
binReads[binReadsLocation] <<= 2;
binReads[binReadsLocation] |= 1;
if (commonData::ignoreN && blockNCount){
blockOfNs.push_back(mapObject(CHARS2BITS(blockNCount-1), CHARS2BITS(totalBases-1)));
blockNCount=0;
}
break;
case 'G':
case 'g':
binReads[binReadsLocation] <<= 2;
binReads[binReadsLocation] |= 2;
if (commonData::ignoreN && blockNCount){
blockOfNs.push_back(mapObject(CHARS2BITS(blockNCount-1), CHARS2BITS(totalBases-1)));
blockNCount=0;
}
break;
case 'T':
case 't':
binReads[binReadsLocation] <<= 2;
binReads[binReadsLocation] |= 3;
if (commonData::ignoreN && blockNCount){
blockOfNs.push_back(mapObject(CHARS2BITS(blockNCount-1), CHARS2BITS(totalBases-1)));
blockNCount=0;
}
break;
default:
if(!blockNCount)
blockNCount=totalBases+1;
chooseLetter = rand() % 4;
if (chooseLetter == 0)
binReads[binReadsLocation] <<= 2;
else if (chooseLetter == 1)
{
binReads[binReadsLocation] <<= 2;
binReads[binReadsLocation] |= 1;
}
else if (chooseLetter == 2)
{
binReads[binReadsLocation] <<= 2;
binReads[binReadsLocation] |= 2;
}
else
{
binReads[binReadsLocation] <<= 2;
binReads[binReadsLocation] |= 3;
}
}
totalBases++;
if ((totalBases%32)==0){
binReadsLocation++;
}
}
}
public:
uint64_t *binReads;
uint64_t totalBases;
std::vector <mapObject> blockOfNs;
seqFileReadInfo() {
size=0;
currPos=0;
binReadSize=0;
binReadsLocation=0;
numSequences=0;
totalBases=0;
}
seqFileReadInfo(string str)
{
size=0;
currPos=0;
binReadSize=0;
binReadsLocation=0;
numSequences=0;
totalBases=0;
file.open(str, ios::in);
if(!file.is_open()) {
cout << "ERROR: unable to open "<< str << " file" << endl;
exit( EXIT_FAILURE );
}
}
uint64_t &getNumSequences() {
return numSequences;
}
void openFile(string s){
file.open(s, ios::in);
if(!file.is_open()) {
cout << "ERROR: unable to open "<< s << " file" << endl;
exit( EXIT_FAILURE );
}
}
void setReverseFile() {
char buffer[256];
memset(buffer,0,256);
sprintf(buffer, "%s/revComp", commonData::nucmer_path);
file.close();
openFile(buffer);
}
void closeFile() {
file.close();
}
void destroy() {
currPos=0;
binReadSize=0;
binReadsLocation=0;
totalBases=0;
strName.clear();
strTmp.clear();
clearMapForNs();
delete [] binReads;
}
void clearFileFlag()
{
file.clear();
file.seekg(0, ios::beg);
}
uint64_t allocBinArray()
{
size = size/commonData::d;
binReadSize = floor((size+numSequences*RANDOM_SEQ_SIZE+commonData::d)/32+4);
binReads = new uint64_t[binReadSize];
return size;
}
void clearMapForNs()
{
blockOfNs.clear();
}
void clearTmpString()
{
strTmp.clear();
strName.clear();
clearMapForNs();
}
void getKmerLeftnRightBoundForNs(uint64_t &currKmerPos, mapObject &bounds)
{
uint64_t right=0;
/*
* Since we do all computation with bits, all our
* positions are even. Here I return 1 (odd position),
* an indication of no Ns towards left
*/
if (!blockOfNs.size()){
bounds.left=0x1;
bounds.right=CHARS2BITS(totalBases-1);
return;
}
vector<mapObject>::iterator it;
it=upper_bound(blockOfNs.begin(), blockOfNs.end(), currKmerPos, mapObject());
/* No N block beyond this point */
if (it == blockOfNs.end())
right = CHARS2BITS(totalBases-1);
else
right = (*it).left-2;
/* This function never gets a position which is N */
if (!currKmerPos || it==blockOfNs.begin()){
bounds.left=0x1;
bounds.right=right;
return;
}
--it;
bounds.left=(*it).right+2;
bounds.right=right;
return;
}
bool checkKmerForNs(uint64_t &currKmerPos, vector<mapObject>::iterator &it)
{
if (!blockOfNs.size())
return false;
while(it != blockOfNs.end())
{
if ((*it).left>currKmerPos)
break;
else
++it;
}
/* No N block beyond this point */
if (it == blockOfNs.end()){
--it;
/* Current position within N block */
if (((*it).left <=currKmerPos) && (currKmerPos <= (*it).right)){
++it;
return true;
}else{
++it;
return false;
}
}
if ((*it).left > (currKmerPos+commonData::kmerSize-2)){
if (it != blockOfNs.begin()){
--it;
if ((*it).right < currKmerPos){
++it;
return false;
}else {
++it;
return true;
}
}else
return false;
}else {
return true;
}
}
void setCurrPos() {
currPos+=size;;
}
uint64_t getCurrPos() {
return currPos;
}
void resetCurrPos() {
currPos=0;
}
bool readChunks()
{
string line;
uint64_t blockNCount=0;
int minSize = commonData::minMemLen/2-1;
uint64_t sz=size+minSize;
/* Process anything remaining from the last iteration */
processTmpString(sz, blockNCount);
while(getline( file, line ).good() ){
if(line[0] == '>' || (totalBases == sz)){
if( !strName.empty()){ // Process what we read from the last entry
if(line[0] != '>') {
processInput(line, sz, blockNCount);
}else {
processInput(randomStr(), sz, blockNCount);
}
if (totalBases == sz) {
if ((totalBases%32)!=0)
{
uint64_t offset = CHARS2BITS(totalBases)%DATATYPE_WIDTH;
binReads[binReadsLocation] <<= (DATATYPE_WIDTH-offset);
binReadsLocation++;
binReads[binReadsLocation]=0;
}
if (commonData::ignoreN && blockNCount){
blockOfNs.push_back(mapObject(CHARS2BITS(blockNCount-1), CHARS2BITS(totalBases-1)));
blockNCount=0;
}
return true;
}
}
if( !line.empty() ){
strName = line.substr(1);
}
} else if( !strName.empty() ){
processInput(line, sz, blockNCount);
}
}
if( !strName.empty() ){ // Process what we read from the last entry
if ((totalBases%32)!=0)
{
uint64_t offset = CHARS2BITS(totalBases)%DATATYPE_WIDTH;
binReads[binReadsLocation] <<= (DATATYPE_WIDTH-offset);
binReadsLocation++;
binReads[binReadsLocation]=0;
}
if (commonData::ignoreN && blockNCount){
blockOfNs.push_back(mapObject(CHARS2BITS(blockNCount-1), CHARS2BITS(totalBases-1)));
blockNCount=0;
}
if (!strTmp.size())
strName.clear();
return true;
}
return false;
}
void flipCharacter(char &in, char &out)
{
switch(in)
{
case 'A':
case 'a':
out='T';
break;
case 'C':
case 'c':
out='G';
break;
case 'G':
case 'g':
out='C';
break;
case 'T':
case 't':
out='A';
break;
default:
out=in;
}
}
void flipNswap(string &content)
{
string::iterator itBeg = content.begin();
string::iterator itEnd = --content.end();
char beg=0, end=0;
uint64_t d=0;
while ((d=distance(itBeg,itEnd)))
{
flipCharacter(*itBeg, end);
flipCharacter(*itEnd, beg);
(*itEnd)=end;
(*itBeg)=beg;
++itBeg;
--itEnd;
if(d==1)
break;
}
if (!d)
flipCharacter(*itEnd, *itEnd);
}
void writeReverseComplementString(string &name, string &content, fstream &file)
{
file << ">" << name << endl;
flipNswap(content);
file << content ;
}
void generateSeqPos(vector<seqData> &vecSeqInfo) {
seqData s;
uint64_t i=0,j=0;
string line;
clearFileFlag();
while(getline(file, line).good() ){
if(line[0] == '>'){
if(!strName.empty()) {
s.start=CHARS2BITS(j);
s.end=CHARS2BITS(i-1);
s.seq.assign(strtok(const_cast<char *>(strName.c_str())," \t\n"));
vecSeqInfo.push_back(s);
s.seq.clear();
i+=RANDOM_SEQ_SIZE;
j=i;
strName.clear();
}
if(!line.empty())
strName=line.substr(1);
} else if( !strName.empty() ) {
i+=line.length();
}
}
if( !strName.empty() ) {
i+=line.length();
s.start=CHARS2BITS(j);
s.end=CHARS2BITS(i-1);
s.seq.assign(strtok(const_cast<char *>(strName.c_str())," \t\n"));
vecSeqInfo.push_back(s);
s.seq.clear();
strName.clear();
}
}
void generateRevComplement(uint32_t revComplement) {
string line,content;
fstream revFile;
if (revComplement) {
char buffer[256];
memset(buffer,0,256);
sprintf(buffer, "%s/revComp", commonData::nucmer_path);
revFile.open(buffer, ios::out);
if (!revFile.is_open())
{
cout << "ERROR: unable to open temporary reverse complement file" << endl;
exit( EXIT_FAILURE );
}
}
clearFileFlag();
while(getline(file, line).good() ){
size += (line.length()+1);
if(line[0] == '>'){
if(!strName.empty()) {
numSequences++;
size += RANDOM_SEQ_SIZE;
if (revComplement){
writeReverseComplementString(strName, content, revFile);
content.clear();
}
strName.clear();
}
if(!line.empty())
strName=line.substr(1);
} else if( !strName.empty() ) {
if (revComplement) {
content += "\n";
content += line;
}
}
}
if( !strName.empty() ) {
size += (line.length()+1);
if (revComplement) {
content += "\n";
content += line;
}
numSequences++;
if (revComplement){
writeReverseComplementString(strName, content, revFile);
content.clear();
}
strName.clear();
}
if (revComplement)
revFile.close();
}
};
class MemExt {
public:
uint64_t lR;
uint64_t rR;
uint64_t lQ;
uint64_t rQ;
MemExt() {
}
MemExt(uint64_t lr, uint64_t rr, uint64_t lq, uint64_t rq)
{
lR=lr;
rR=rr;
lQ=lq;
rQ=rq;
}
bool operator () (const MemExt &obj1, const MemExt &obj2)
{
if (obj1.lQ<obj2.lQ)
return true;
else if (obj1.lQ>obj2.lQ)
return false;
else{
if (obj1.lR<obj2.lR)
return true;
else
return false;
}
}
};
class tmpFilesInfo {
fstream *TmpFiles;
vector <MemExt> MemExtVec;
uint64_t numMemsInFile;
bool checkMEMExt(uint64_t &lr, uint64_t &rr, uint64_t &lq, uint64_t &rq, seqFileReadInfo &QueryFile, seqFileReadInfo &RefFile) {
if ((!lq && QueryFile.getCurrPos()) || rq == CHARS2BITS(QueryFile.totalBases-1)) {
return true;
}else if((!lr && RefFile.getCurrPos()) || rr == CHARS2BITS(RefFile.totalBases-1)) {
return true;
}
return false;
}
void writeToFile(uint64_t lQ, uint64_t rQ, uint64_t lR, uint64_t rR, uint32_t &revComplement) {
MemExt m;
m.lQ=lQ;
m.lR=lR;
m.rQ=rQ;
m.rR=rR;
if (IS_MATCH_BOTH_DEF(revComplement))
TmpFiles[m.lQ/numMemsInFile+NUM_TMP_FILES].write((char *)&m, sizeof(MemExt));
else
TmpFiles[m.lQ/numMemsInFile].write((char *)&m, sizeof(MemExt));
}
void writeToVector(uint64_t lQ, uint64_t rQ, uint64_t lR, uint64_t rR) {
MemExt m;
m.lQ=lQ;
m.lR=lR;
m.rQ=rQ;
m.rR=rR;
MemExtVec.push_back(m);
}
public:
tmpFilesInfo(int numFiles) {
TmpFiles = new fstream[numFiles];
}
~tmpFilesInfo() {
delete [] TmpFiles;
}
void setNumMemsInFile(uint64_t size, uint64_t &numSequences) {
numMemsInFile = ((2*(size*commonData::d+numSequences*RANDOM_SEQ_SIZE+commonData::d))/NUM_TMP_FILES);
}
static bool compare_reference (const MemExt &obj1, const MemExt &obj2)
{
return (obj1.lR>=obj2.lR?false:true);
}
static bool myUnique(const MemExt &obj1, const MemExt &obj2)
{
if((obj1.lQ==obj2.lQ) && (obj1.rQ==obj2.rQ) && (obj1.rR==obj2.rR) && (obj1.lR==obj2.lR))
return true;
else
return false;
}
void openFiles(ios_base::openmode mode, int numFiles) {
char buffer[256];
memset(buffer,0,256);
static int flag=0;
sprintf(buffer, "%s", commonData::nucmer_path);
if (!flag) {
if(mkdir(buffer, S_IRWXU|S_IRGRP|S_IXGRP))
{
cout << "ERROR: unable to open temporary directory" << endl;
exit( EXIT_FAILURE );
}
flag=1;
}
/* Last two files hold the sequence/pos mapping
* for reference and query file respectively
*/
for (int32_t i=0;i<numFiles;i++) {
/* Temporary file to hold the mems */
sprintf(buffer, "%s/%d", commonData::nucmer_path, i);
TmpFiles[i].open(buffer, mode);
if (!TmpFiles[i].is_open())
{
cout << "ERROR: unable to open temporary file" << endl;
exit( EXIT_FAILURE );
}
}
}
void closeFiles(int numFiles) {
for (int32_t i=0;i<numFiles;i++){
TmpFiles[i].close();
}
}
fstream& getMapFile(int fIndex) {
return TmpFiles[fIndex];
}
bool writeMemInTmpFiles(uint64_t &lRef, uint64_t &rRef, uint64_t &lQue, uint64_t &rQue, seqFileReadInfo &QueryFile, seqFileReadInfo &RefFile, uint32_t &revComplement) {
MemExt m;
uint64_t currPosQ = CHARS2BITS(QueryFile.getCurrPos());
uint64_t currPosR = CHARS2BITS(RefFile.getCurrPos());
if (rRef-lRef+2 >= static_cast<uint64_t>(commonData::minMemLen)) {
if (!(commonData::d==1 && commonData::numThreads==1) && checkMEMExt(lRef, rRef, lQue, rQue, QueryFile, RefFile)) {
#pragma omp critical(writeVector)
writeToVector(currPosQ+lQue, currPosQ+rQue, currPosR+lRef, currPosR+rRef);
}else {
#pragma omp critical(writeFile)
writeToFile(currPosQ+lQue, currPosQ+rQue, currPosR+lRef, currPosR+rRef, revComplement);
}
return true;
}else
return false;
}
void printQueryHeader(vector<seqData>::iterator &itQ, uint32_t &revComplement)
{
if (revComplement & 0x1){
if (commonData::lenInHeader) {
cout << "> " << (*itQ).seq << " Reverse" << " Len = " << ((*itQ).end-(*itQ).start+2)/2 << endl;
}else{
cout << "> " << (*itQ).seq << " Reverse" << endl;
}
}else{
if (commonData::lenInHeader){
cout << "> " << (*itQ).seq << " Len = " << ((*itQ).end-(*itQ).start+2)/2 << endl;
}else{
cout << "> " << (*itQ).seq << endl;
}
}
}
void printMemOnTerminal(vector<seqData> &refSeqInfo, vector<seqData> &querySeqInfo, MemExt &m, uint32_t &revComplement) {
uint64_t &lRef = m.lR;
uint64_t &rRef = m.rR;
uint64_t &lQue = m.lQ;
uint64_t &rQue = m.rQ;
static int flag=0;
vector<seqData>::iterator itR;
static vector<seqData>::iterator itQ=querySeqInfo.begin();
seqData s;
/* print remianing query sequences - if any */
if (!lRef && !rRef && !lQue && !rQue) {
/* No matches found - simply return */
if (!flag){
printQueryHeader(itQ, revComplement);
}
while(itQ != --querySeqInfo.end()){
++itQ;
printQueryHeader(itQ, revComplement);
}
itQ=querySeqInfo.begin();
flag=0;
return;
}
s.start=lRef;
s.end=rRef;
if (rRef-lRef+2 < static_cast<uint64_t>(commonData::minMemLen))
return;
/* Process relative position for Reference sequence */
itR = lower_bound(refSeqInfo.begin(), refSeqInfo.end(), s, seqData());
if ((*itR).start <= lRef && (*itR).end >= rRef){
// MEM within acutal sequence
// s------e--s------e--s------e
// s--|--|e
lRef?lRef-=((*itR).start):lRef;
rRef-=((*itR).start);
}else if ((*itR).start > lRef && (*itR).end >= rRef) {
if ((*itR).start > rRef) //mem within random character
return;
// s------e--s------e--s------e
// s------e-|s--|---e
lQue+=((*itR).start-lRef);
lRef=0;
rRef-=((*itR).start);
}else if ((*itR).start > lRef && (*itR).end < rRef) {
// s------e--s------e--s------e
// s------e-|s------e-|s------e
lQue+=((*itR).start-lRef);
lRef=0;
rQue-=(rRef-(*itR).end);
rRef=((*itR).end-(*itR).start);
}else if ((*itR).start <= lRef && (*itR).end < rRef) {
// s------e--s------e--s------e
// s------e--s-----|e-|s------e
rQue-=(rRef-(*itR).end);
lRef?lRef-=((*itR).start):lRef;
rRef=((*itR).end-(*itR).start);
}else //mem within random character
return;
if (rRef-lRef+2 < static_cast<uint64_t>(commonData::minMemLen))
return;
/* Print first Query sequence */
if (!flag){
printQueryHeader(itQ, revComplement);
flag=1;
}
/* Process relative position for Query sequence */
while(lQue >= (*itQ).end){
++itQ;
printQueryHeader(itQ, revComplement);
}
if ((*itQ).start <= lQue && (*itQ).end >= rQue){
// MEM within acutal sequence
// s------e--s------e--s------e
// s--|--|e
lQue?lQue-=((*itQ).start):lQue;
rQue-=((*itQ).start);
}else if ((*itQ).start > lQue && (*itQ).end >= rQue) {
if ((*itQ).start > rQue) //mem within random character
return;
// s------e--s------e--s------e
// s------e-|s--|---e
lRef+=((*itQ).start-lQue);
lQue=0;
rQue-=((*itQ).start);
}else if ((*itQ).start > lQue && (*itQ).end < rQue) {
// s------e--s------e--s------e
// s------e-|s------e-|s------e
lRef+=((*itQ).start-lQue);
lQue=0;
rRef-=(rQue-(*itQ).end);
rQue=((*itQ).end-(*itQ).start);
}else if ((*itQ).start <= lQue && (*itQ).end < rQue) {
// s------e--s------e--s------e
// s------e--s-----|e-|s------e
rRef-=(rQue-(*itQ).end);
lQue?lQue-=((*itQ).start):lQue;
rQue=((*itQ).end-(*itQ).start);
}else //mem within random character
return;
if (rRef-lRef+2 >= static_cast<uint64_t>(commonData::minMemLen)){
if (refSeqInfo.size() == 1 && !commonData::fourColOutput) {
if ((revComplement & 0x1) && commonData::relQueryPos)
cout << " " << setw(15) << ((lRef+2)/2) << setw(15) << ((*itQ).end-(*itQ).start-lQue+2)/2 << setw(15) << ((rRef-lRef+2)/2) << endl;
else
cout << " " << setw(15) << ((lRef+2)/2) << setw(15) << ((lQue+2)/2) << setw(15) << ((rRef-lRef+2)/2) << endl;
}else{
if ((revComplement & 0x1) && commonData::relQueryPos) {
cout << " " << setw(30) << std::left <<(*itR).seq << setw(15) << ((lRef+2)/2) << setw(15) << ((*itQ).end-(*itQ).start-lQue+2)/2 << setw(15) << ((rRef-lRef+2)/2) << endl;
}else{
cout << " " << setw(30) << std::left <<(*itR).seq << setw(15) << ((lRef+2)/2) << setw(15) << ((lQue+2)/2) << setw(15) << ((rRef-lRef+2)/2) << endl;
}
}
}
}
void mergeMemExtVector (uint32_t &revComplement) {
int flag=0;
MemExt m;
if (commonData::d==1 && commonData::numThreads==1)
return;
if (MemExtVec.size() > 1) {
do {
flag=0;
sort(MemExtVec.begin(), MemExtVec.end(), MemExt());
for (vector<MemExt>::iterator it=MemExtVec.begin(); it != --MemExtVec.end(); ++it) {
vector<MemExt>::iterator dup = it;
++dup;
for (; dup != MemExtVec.end(); ++dup) {
if((*dup).lQ + static_cast<uint64_t>(commonData::minMemLen-2)-2 > (*it).rQ)
break;
if((*dup).lQ + static_cast<uint64_t>(commonData::minMemLen-2)-2 == (*it).rQ) {
if((*dup).lR + static_cast<uint64_t>(commonData::minMemLen-2)-2 == (*it).rR) {
flag=1;
(*it).rQ=(*dup).rQ;
(*it).rR=(*dup).rR;
MemExtVec.erase(dup);
break;
}
}
}
if (flag)
break;
}
sort(MemExtVec.begin(), MemExtVec.end(), compare_reference);
for (vector<MemExt>::iterator it=MemExtVec.begin(); it != --MemExtVec.end(); ++it) {
vector<MemExt>::iterator dup = it;
++dup;
for (; dup != MemExtVec.end(); ++dup) {
if((*dup).lR + static_cast<uint64_t>(commonData::minMemLen-2)-2 > (*it).rR)
break;
if((*dup).lR + static_cast<uint64_t>(commonData::minMemLen-2)-2 == (*it).rR) {
if((*dup).lQ + static_cast<uint64_t>(commonData::minMemLen-2)-2 == (*it).rQ) {
flag=1;
(*it).rQ=(*dup).rQ;
(*it).rR=(*dup).rR;
MemExtVec.erase(dup);
break;
}
}
}
if (flag)
break;
}
} while (flag);
}
for (vector<MemExt>::iterator it=MemExtVec.begin(); it != MemExtVec.end(); ++it) {
writeToFile((*it).lQ, (*it).rQ, (*it).lR, (*it).rR, revComplement);
}
MemExtVec.clear();
}
void outputInMummerFormat() {
string line, last_line;
static int first=1;
char buffer[256];
int n1=2*NUM_TMP_FILES,n2=2*NUM_TMP_FILES+1;
fstream *filePtr, *forFile=&TmpFiles[n1], *revFile=&TmpFiles[n2];
memset(buffer,0,256);
sprintf(buffer, "%s/%d", commonData::nucmer_path, n1);
(*forFile).open(buffer, ios::in);
sprintf(buffer, "%s/%d", commonData::nucmer_path, n2);
(*revFile).open(buffer, ios::in);
filePtr = forFile;
if(getline((*filePtr), line).good())
cout << line << endl;
while(getline((*filePtr), line).good()) {
if(line[0] == '>'){
if (last_line.size())
cout << last_line << endl;
last_line = line;
if (filePtr == forFile) {
filePtr = revFile;
if (first) {
if(getline((*filePtr), line).good())
cout << line << endl;
first=0;
}
}else
filePtr = forFile;
continue;
}
cout << line << endl;
}
cout << last_line << endl;
filePtr = revFile;
while(getline((*filePtr), line).good())
cout << line << endl;
(*revFile).close();
(*forFile).close();
remove(buffer);
sprintf(buffer, "%s/%d", commonData::nucmer_path, n1);
remove(buffer);
}
void removeDuplicates(vector<seqData> &refSeqInfo, vector<seqData> &querySeqInfo, uint32_t revComplement) {
streambuf *coutbuf=std::cout.rdbuf();
int numFiles=0;
MemExt m;
seqData s;
char buffer[256];
memset(buffer,0,256);
if(IS_MATCH_BOTH_DEF(revComplement))
numFiles=2*NUM_TMP_FILES;
else
numFiles=NUM_TMP_FILES;
openFiles(ios::in|ios::binary, numFiles);
sprintf(buffer, "%s/%d", commonData::nucmer_path, numFiles);
if (IS_MATCH_BOTH_DEF(revComplement))
TmpFiles[numFiles].open(buffer, ios::out|ios::trunc);
else
remove(buffer);
sprintf(buffer, "%s/%d", commonData::nucmer_path, numFiles+1);
if (IS_MATCH_BOTH_DEF(revComplement))
TmpFiles[numFiles+1].open(buffer, ios::out|ios::trunc);
else
remove(buffer);
/* Indication that reverse complement is being processed */
if (IS_MATCH_REV_DEF(revComplement))
revComplement|=0x1;
/* Redirect std::cout to a file */
if (IS_MATCH_BOTH_DEF(revComplement)){
std::cout.rdbuf(TmpFiles[numFiles].rdbuf());
}
for (int32_t i=0;i<numFiles;i++){
vector<MemExt>::iterator last;
sprintf(buffer, "%s/%d", commonData::nucmer_path, i);
if (i==NUM_TMP_FILES) {
/* Output any unsued query sequence */
m.lR=m.lQ=m.rR=m.rQ=0;
printMemOnTerminal(refSeqInfo, querySeqInfo, m, revComplement);
/* Processing reverse complement files now*/
revComplement|=0x1;
/* Redirect output to reverse complement file */
std::cout.rdbuf(TmpFiles[numFiles+1].rdbuf());
}
while(!TmpFiles[i].read((char *)&m, sizeof (MemExt)).eof()) {
MemExtVec.push_back(m);
}
sort(MemExtVec.begin(), MemExtVec.end(), MemExt());
if (commonData::d==1 && commonData::numThreads==1) // Everything is unique
last=MemExtVec.end();
else
last=unique(MemExtVec.begin(), MemExtVec.end(), myUnique);
TmpFiles[i].close();
remove(buffer);
for (vector<MemExt>::iterator it=MemExtVec.begin(); it!=last; ++it) {
printMemOnTerminal(refSeqInfo, querySeqInfo, *it, revComplement);
}
MemExtVec.clear();
}
/* Output any unsued query sequence */
m.lR=m.lQ=m.rR=m.rQ=0;
printMemOnTerminal(refSeqInfo, querySeqInfo, m, revComplement);
/* Restore std::cout */
if (IS_MATCH_BOTH_DEF(revComplement)){
TmpFiles[numFiles].close();
TmpFiles[numFiles+1].close();
std::cout.rdbuf(coutbuf);
outputInMummerFormat();
}
if(revComplement) {
sprintf(buffer, "%s/revComp", commonData::nucmer_path);
remove(buffer);
}
sprintf(buffer, "%s", commonData::nucmer_path);
remove(buffer);
}
};
|
gamma_index_ivfpq.h | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This faiss source code is licensed under the MIT license.
* https://github.com/facebookresearch/faiss/blob/master/LICENSE
*
*
* The works below are modified based on faiss:
* 1. Replace the static batch indexing with real time indexing
* 2. Add the fine-grained sort after PQ coarse sort
* 3. Add the numeric field and bitmap filters in the process of searching
*
* Modified works copyright 2019 The Gamma Authors.
*
* The modified codes are licensed under the Apache License, Version 2.0 license
* found in the LICENSE file in the root directory of this source tree.
*
*/
#ifndef GAMMA_INDEX_IVFPQ_H_
#define GAMMA_INDEX_IVFPQ_H_
#include "gamma_common_data.h"
#include "gamma_index.h"
#include "log.h"
#include "numeric_index.h"
#include "raw_vector.h"
#include "realtime_invert_index.h"
#include "faiss/AuxIndexStructures.h"
#include "faiss/FaissAssert.h"
#include "faiss/Heap.h"
#include "faiss/IndexIVF.h"
#include "faiss/IndexIVFPQ.h"
#include "faiss/InvertedLists.h"
#include "faiss/hamming.h"
#include "faiss/index_io.h"
#include "faiss/utils.h"
namespace tig_gamma {
/// statistics are robust to internal threading, but not if
/// IndexIVFPQ::search_preassigned is called by multiple threads
struct IndexIVFPQStats {
size_t nrefine; // nb of refines (IVFPQR)
size_t n_hamming_pass;
// nb of passed Hamming distance tests (for polysemous)
// timings measured with the CPU RTC
// on all threads
size_t search_cycles;
size_t refine_cycles; // only for IVFPQR
IndexIVFPQStats() { reset(); }
void reset(){};
};
// global var that collects them all
extern IndexIVFPQStats indexIVFPQ_stats;
// namespace {
using idx_t = faiss::Index::idx_t;
static uint64_t get_cycles() {
#ifdef __x86_64__
uint32_t high, low;
asm volatile("rdtsc \n\t" : "=a"(low), "=d"(high));
return ((uint64_t)high << 32) | (low);
#else
return 0;
#endif
}
#define TIC t0 = get_cycles()
#define TOC get_cycles() - t0
/** QueryTables manages the various ways of searching an
* IndexIVFPQ. The code contains a lot of branches, depending on:
* - metric_type: are we computing L2 or Inner product similarity?
* - by_residual: do we encode raw vectors or residuals?
* - use_precomputed_table: are x_R|x_C tables precomputed?
* - polysemous_ht: are we filtering with polysemous codes?
*/
struct QueryTables {
/*****************************************************
* General data from the IVFPQ
*****************************************************/
const faiss::IndexIVFPQ &ivfpq;
const faiss::IVFSearchParameters *params;
// copied from IndexIVFPQ for easier access
int d;
const faiss::ProductQuantizer &pq;
faiss::MetricType metric_type;
bool by_residual;
int use_precomputed_table;
int polysemous_ht;
// pre-allocated data buffers
float *sim_table, *sim_table_2;
float *residual_vec, *decoded_vec;
// single data buffer
std::vector<float> mem;
// for table pointers
std::vector<const float *> sim_table_ptrs;
explicit QueryTables(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params)
: ivfpq(ivfpq), d(ivfpq.d), pq(ivfpq.pq), metric_type(ivfpq.metric_type),
by_residual(ivfpq.by_residual),
use_precomputed_table(ivfpq.use_precomputed_table) {
mem.resize(pq.ksub * pq.M * 2 + d * 2);
sim_table = mem.data();
sim_table_2 = sim_table + pq.ksub * pq.M;
residual_vec = sim_table_2 + pq.ksub * pq.M;
decoded_vec = residual_vec + d;
// for polysemous
polysemous_ht = ivfpq.polysemous_ht;
if (auto ivfpq_params =
dynamic_cast<const faiss::IVFPQSearchParameters *>(params)) {
polysemous_ht = ivfpq_params->polysemous_ht;
}
if (polysemous_ht != 0) {
q_code.resize(pq.code_size);
}
init_list_cycles = 0;
sim_table_ptrs.resize(pq.M);
}
/*****************************************************
* What we do when query is known
*****************************************************/
// field specific to query
const float *qi;
// query-specific intialization
void init_query(const float *qi) {
this->qi = qi;
if (metric_type == faiss::METRIC_INNER_PRODUCT)
init_query_IP();
else
init_query_L2();
if (!by_residual && polysemous_ht != 0)
pq.compute_code(qi, q_code.data());
}
void init_query_IP() {
// precompute some tables specific to the query qi
pq.compute_inner_prod_table(qi, sim_table);
}
void init_query_L2() {
if (!by_residual) {
pq.compute_distance_table(qi, sim_table);
} else if (use_precomputed_table) {
pq.compute_inner_prod_table(qi, sim_table_2);
}
}
/*****************************************************
* When inverted list is known: prepare computations
*****************************************************/
// fields specific to list
long key;
float coarse_dis;
std::vector<uint8_t> q_code;
uint64_t init_list_cycles;
/// once we know the query and the centroid, we can prepare the
/// sim_table that will be used for accumulation
/// and dis0, the initial value
float precompute_list_tables() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
dis0 = precompute_list_tables_IP();
else
dis0 = precompute_list_tables_L2();
}
init_list_cycles += TOC;
return dis0;
}
float precompute_list_table_pointers() {
float dis0 = 0;
uint64_t t0;
TIC;
if (by_residual) {
if (metric_type == faiss::METRIC_INNER_PRODUCT)
FAISS_THROW_MSG("not implemented");
else
dis0 = precompute_list_table_pointers_L2();
}
init_list_cycles += TOC;
return dis0;
}
/*****************************************************
* compute tables for inner prod
*****************************************************/
float precompute_list_tables_IP() {
// prepare the sim_table that will be used for accumulation
// and dis0, the initial value
ivfpq.quantizer->reconstruct(key, decoded_vec);
// decoded_vec = centroid
float dis0 = faiss::fvec_inner_product(qi, decoded_vec, d);
if (polysemous_ht) {
for (int i = 0; i < d; i++) {
residual_vec[i] = qi[i] - decoded_vec[i];
}
pq.compute_code(residual_vec, q_code.data());
}
return dis0;
}
/*****************************************************
* compute tables for L2 distance
*****************************************************/
float precompute_list_tables_L2() {
float dis0 = 0;
if (use_precomputed_table == 0 || use_precomputed_table == -1) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_distance_table(residual_vec, sim_table);
if (polysemous_ht != 0) {
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 1) {
dis0 = coarse_dis;
faiss::fvec_madd(pq.M * pq.ksub,
&ivfpq.precomputed_table[key * pq.ksub * pq.M], -2.0,
sim_table_2, sim_table);
if (polysemous_ht != 0) {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
pq.compute_code(residual_vec, q_code.data());
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
const float *qtab = sim_table_2; // query-specific table
float *ltab = sim_table; // (output) list-specific table
long k = key;
for (size_t cm = 0; cm < cpq.M; cm++) {
// compute PQ index
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
// get corresponding table
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
if (polysemous_ht == 0) {
// sum up with query-specific table
faiss::fvec_madd(Mf * pq.ksub, pc, -2.0, qtab, ltab);
ltab += Mf * pq.ksub;
qtab += Mf * pq.ksub;
} else {
for (size_t m = cm * Mf; m < (cm + 1) * Mf; m++) {
q_code[m] =
faiss::fvec_madd_and_argmin(pq.ksub, pc, -2, qtab, ltab);
pc += pq.ksub;
ltab += pq.ksub;
qtab += pq.ksub;
}
}
}
}
return dis0;
}
float precompute_list_table_pointers_L2() {
float dis0 = 0;
if (use_precomputed_table == 1) {
dis0 = coarse_dis;
const float *s = &ivfpq.precomputed_table[key * pq.ksub * pq.M];
for (size_t m = 0; m < pq.M; m++) {
sim_table_ptrs[m] = s;
s += pq.ksub;
}
} else if (use_precomputed_table == 2) {
dis0 = coarse_dis;
const faiss::MultiIndexQuantizer *miq =
dynamic_cast<const faiss::MultiIndexQuantizer *>(ivfpq.quantizer);
FAISS_THROW_IF_NOT(miq);
const faiss::ProductQuantizer &cpq = miq->pq;
int Mf = pq.M / cpq.M;
long k = key;
int m0 = 0;
for (size_t cm = 0; cm < cpq.M; cm++) {
int ki = k & ((uint64_t(1) << cpq.nbits) - 1);
k >>= cpq.nbits;
const float *pc =
&ivfpq.precomputed_table[(ki * pq.M + cm * Mf) * pq.ksub];
for (int m = m0; m < m0 + Mf; m++) {
sim_table_ptrs[m] = pc;
pc += pq.ksub;
}
m0 += Mf;
}
} else {
FAISS_THROW_MSG("need precomputed tables");
}
if (polysemous_ht) {
FAISS_THROW_MSG("not implemented");
// Not clear that it makes sense to implemente this,
// because it costs M * ksub, which is what we wanted to
// avoid with the tables pointers.
}
return dis0;
}
};
/*****************************************************
* Scaning the codes.
* The scanning functions call their favorite precompute_*
* function to precompute the tables they need.
*****************************************************/
template <typename IDType, bool store_pairs, class C,
faiss::MetricType METRIC_TYPE>
struct IVFPQScannerT : QueryTables {
const uint8_t *list_codes;
const IDType *list_ids;
size_t list_size;
explicit IVFPQScannerT(const faiss::IndexIVFPQ &ivfpq,
const faiss::IVFSearchParameters *params)
: QueryTables(ivfpq, params) {
FAISS_THROW_IF_NOT(pq.nbits == 8);
assert(METRIC_TYPE == metric_type);
}
float dis0;
void init_list(idx_t list_no, float coarse_dis, int mode) {
this->key = list_no;
this->coarse_dis = coarse_dis;
if (mode == 2) {
dis0 = precompute_list_tables();
} else if (mode == 1) {
dis0 = precompute_list_table_pointers();
}
}
/// tables are not precomputed, but pointers are provided to the
/// relevant X_c|x_r tables
size_t scan_list_with_pointer(size_t ncode, const uint8_t *codes,
const idx_t *ids, size_t k, float *heap_sim,
idx_t *heap_ids) const {
size_t nup = 0;
for (size_t j = 0; j < ncode; j++) {
float dis = dis0;
const float *tab = sim_table_2;
for (size_t m = 0; m < pq.M; m++) {
int ci = *codes++;
dis += sim_table_ptrs[m][ci] - 2 * tab[ci];
tab += pq.ksub;
}
if (C::cmp(heap_sim[0], dis)) {
faiss::heap_pop<C>(k, heap_sim, heap_ids);
long id = store_pairs ? (key << 32 | j) : ids[j];
faiss::heap_push<C>(k, heap_sim, heap_ids, dis, id);
nup++;
}
}
return nup;
}
/// nothing is precomputed: access residuals on-the-fly
size_t scan_on_the_fly_dist(size_t ncode, const uint8_t *codes,
const idx_t *ids, size_t k, float *heap_sim,
idx_t *heap_ids) const {
const float *dvec;
float dis0 = 0;
size_t nup = 0;
if (by_residual) {
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
ivfpq.quantizer->reconstruct(key, residual_vec);
dis0 = faiss::fvec_inner_product(residual_vec, qi, d);
} else {
ivfpq.quantizer->compute_residual(qi, residual_vec, key);
}
dvec = residual_vec;
} else {
dvec = qi;
dis0 = 0;
}
for (size_t j = 0; j < ncode; j++) {
pq.decode(codes, decoded_vec);
codes += pq.code_size;
float dis;
if (METRIC_TYPE == faiss::METRIC_INNER_PRODUCT) {
dis = dis0 + faiss::fvec_inner_product(decoded_vec, qi, d);
} else {
dis = faiss::fvec_L2sqr(decoded_vec, dvec, d);
}
if (C::cmp(heap_sim[0], dis)) {
faiss::heap_pop<C>(k, heap_sim, heap_ids);
long id = store_pairs ? (key << 32 | j) : ids[j];
faiss::heap_push<C>(k, heap_sim, heap_ids, dis, id);
nup++;
}
}
return nup;
}
/*****************************************************
* Scanning codes with polysemous filtering
*****************************************************/
template <class HammingComputer>
size_t scan_list_polysemous_hc(size_t ncode, const uint8_t *codes,
const idx_t *ids, size_t k, float *heap_sim,
idx_t *heap_ids) const {
int ht = ivfpq.polysemous_ht;
size_t n_hamming_pass = 0, nup = 0;
int code_size = pq.code_size;
HammingComputer hc(q_code.data(), code_size);
for (size_t j = 0; j < ncode; j++) {
const uint8_t *b_code = codes;
int hd = hc.hamming(b_code);
if (hd < ht) {
n_hamming_pass++;
float dis = dis0;
const float *tab = sim_table;
for (size_t m = 0; m < pq.M; m++) {
dis += tab[*b_code++];
tab += pq.ksub;
}
if (C::cmp(heap_sim[0], dis)) {
faiss::heap_pop<C>(k, heap_sim, heap_ids);
long id = store_pairs ? (key << 32 | j) : ids[j];
faiss::heap_push<C>(k, heap_sim, heap_ids, dis, id);
nup++;
}
}
codes += code_size;
}
#pragma omp critical
{ indexIVFPQ_stats.n_hamming_pass += n_hamming_pass; }
return nup;
}
size_t scan_list_polysemous(size_t ncode, const uint8_t *codes,
const idx_t *ids, size_t k, float *heap_sim,
idx_t *heap_ids) const {
switch (pq.code_size) {
#define HANDLE_CODE_SIZE(cs) \
case cs: \
return scan_list_polysemous_hc<faiss::HammingComputer##cs>( \
ncode, codes, ids, k, heap_sim, heap_ids); \
break
HANDLE_CODE_SIZE(4);
HANDLE_CODE_SIZE(8);
HANDLE_CODE_SIZE(16);
HANDLE_CODE_SIZE(20);
HANDLE_CODE_SIZE(32);
HANDLE_CODE_SIZE(64);
#undef HANDLE_CODE_SIZE
default:
if (pq.code_size % 8 == 0)
return scan_list_polysemous_hc<faiss::HammingComputerM8>(
ncode, codes, ids, k, heap_sim, heap_ids);
else
return scan_list_polysemous_hc<faiss::HammingComputerM4>(
ncode, codes, ids, k, heap_sim, heap_ids);
break;
}
}
};
template <faiss::MetricType METRIC_TYPE, bool store_pairs, class C,
int precompute_mode>
struct GammaIndexScanner : IVFPQScannerT<idx_t, store_pairs, C, METRIC_TYPE>,
faiss::InvertedListScanner {
GammaIndexScanner(const faiss::IndexIVFPQ &ivfpq)
: IVFPQScannerT<idx_t, store_pairs, C, METRIC_TYPE>(ivfpq, nullptr) {
docids_bitmap_ = nullptr;
numeric_index_ptr_ = nullptr;
raw_vec_ = nullptr;
}
size_t scan_list_with_table(size_t ncode, const uint8_t *codes,
const idx_t *ids, size_t k, float *heap_sim,
idx_t *heap_ids) const {
assert(this->pq.M % 4 == 0);
// set filter func
std::function<bool(int)> is_filterable;
if (numeric_index_ptr_ != nullptr) {
is_filterable = [this](int doc_id) -> bool {
return (bitmap::test(docids_bitmap_, doc_id) ||
(not numeric_index_ptr_->Has(doc_id)));
};
} else {
is_filterable = [this](int doc_id) -> bool {
return (bitmap::test(docids_bitmap_, doc_id));
};
}
// set compute distance func
std::function<float(const uint8_t *)> calc_dis;
if (this->pq.M % 4 == 0) {
calc_dis = [this](const uint8_t *codes) -> float {
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m += 4) {
dis += tab[*codes++], tab += this->pq.ksub;
dis += tab[*codes++], tab += this->pq.ksub;
dis += tab[*codes++], tab += this->pq.ksub;
dis += tab[*codes++], tab += this->pq.ksub;
}
return dis;
};
} else {
calc_dis = [this](const uint8_t *codes) -> float {
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*codes++], tab += this->pq.ksub;
}
return dis;
};
}
int nup = 0;
#define HANDLE_ONE \
do { \
int doc_id = raw_vec_->vid2docid_[ids[j]]; \
if ((numeric_index_ptr_ != nullptr && \
(not numeric_index_ptr_->Has(doc_id))) || \
bitmap::test(docids_bitmap_, doc_id)) { \
codes += this->pq.M; /* increment pointer */ \
j++; /* increment j*/ \
continue; \
} \
\
float dis = this->dis0; \
const float *tab = this->sim_table; \
for (size_t m = 0; m < this->pq.M; m += 4) { \
dis += tab[*codes++], tab += this->pq.ksub; \
dis += tab[*codes++], tab += this->pq.ksub; \
dis += tab[*codes++], tab += this->pq.ksub; \
dis += tab[*codes++], tab += this->pq.ksub; \
} \
\
if (C::cmp(heap_sim[0], dis)) { \
faiss::heap_pop<C>(k, heap_sim, heap_ids); \
long id = ids[j]; \
faiss::heap_push<C>(k, heap_sim, heap_ids, dis, id); \
nup++; \
} \
\
j++; /* increment j */ \
} while (0)
size_t j = 0;
size_t loops = ncode / 8;
for (size_t i = 0; i < loops; i++) {
HANDLE_ONE; // 1
HANDLE_ONE; // 2
HANDLE_ONE; // 3
HANDLE_ONE; // 4
HANDLE_ONE; // 5
HANDLE_ONE; // 6
HANDLE_ONE; // 7
HANDLE_ONE; // 8
}
switch (ncode % 8) {
case 7:
HANDLE_ONE;
case 6:
HANDLE_ONE;
case 5:
HANDLE_ONE;
case 4:
HANDLE_ONE;
case 3:
HANDLE_ONE;
case 2:
HANDLE_ONE;
case 1:
HANDLE_ONE;
}
assert(j == ncode);
#undef HANDLE_ONE
return nup;
}
size_t scan_list_with_table(size_t ncode, const uint8_t **codes,
const idx_t *ids, size_t k, float *heap_sim,
idx_t *heap_ids) const {
int nup = 0;
assert(this->pq.M % 4 == 0);
#define HANDLE_ONE \
do { \
float dis = this->dis0; \
const float *tab = this->sim_table; \
const uint8_t *code = codes[j]; \
for (size_t m = 0; m < this->pq.M; m += 4) { \
dis += tab[*code++], tab += this->pq.ksub; \
dis += tab[*code++], tab += this->pq.ksub; \
dis += tab[*code++], tab += this->pq.ksub; \
dis += tab[*code++], tab += this->pq.ksub; \
} \
\
if (C::cmp(heap_sim[0], dis)) { \
faiss::heap_pop<C>(k, heap_sim, heap_ids); \
long id = ids[j]; \
faiss::heap_push<C>(k, heap_sim, heap_ids, dis, id); \
nup++; \
} \
\
j++; /* increment j */ \
} while (0)
size_t j = 0;
size_t loops = ncode / 8;
for (size_t i = 0; i < loops; i++) {
HANDLE_ONE; // 1
HANDLE_ONE; // 2
HANDLE_ONE; // 3
HANDLE_ONE; // 4
HANDLE_ONE; // 5
HANDLE_ONE; // 6
HANDLE_ONE; // 7
HANDLE_ONE; // 8
}
switch (ncode % 8) {
case 7:
HANDLE_ONE;
case 6:
HANDLE_ONE;
case 5:
HANDLE_ONE;
case 4:
HANDLE_ONE;
case 3:
HANDLE_ONE;
case 2:
HANDLE_ONE;
case 1:
HANDLE_ONE;
}
assert(j == ncode);
#undef HANDLE_ONE
return nup;
}
inline void set_query(const float *query) override {
this->init_query(query);
}
inline void set_search_condition(const GammaSearchCondition *condition) {
this->numeric_index_ptr_ = condition->numeric_results;
}
inline void set_list(idx_t list_no, float coarse_dis) override {
this->init_list(list_no, coarse_dis, precompute_mode);
}
inline float distance_to_code(const uint8_t *code) const override {
assert(precompute_mode == 2);
float dis = this->dis0;
const float *tab = this->sim_table;
for (size_t m = 0; m < this->pq.M; m++) {
dis += tab[*code++];
tab += this->pq.ksub;
}
return dis;
}
inline size_t scan_codes(size_t ncode, const uint8_t *codes, const idx_t *ids,
float *heap_sim, idx_t *heap_ids,
size_t k) const override {
if (this->polysemous_ht > 0) {
assert(precompute_mode == 2);
this->scan_list_polysemous(ncode, codes, ids, k, heap_sim, heap_ids);
} else if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, ids, k, heap_sim, heap_ids);
} else if (precompute_mode == 1) {
this->scan_list_with_pointer(ncode, codes, ids, k, heap_sim, heap_ids);
} else if (precompute_mode == 0) {
this->scan_on_the_fly_dist(ncode, codes, ids, k, heap_sim, heap_ids);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
inline size_t scan_codes(size_t ncode, const uint8_t **codes,
const idx_t *ids, float *heap_sim, idx_t *heap_ids,
size_t k) {
if (precompute_mode == 2) {
this->scan_list_with_table(ncode, codes, ids, k, heap_sim, heap_ids);
} else {
FAISS_THROW_MSG("bad precomp mode");
}
return 0;
}
inline void SetVecFilter(const char *docids_bitmap,
const RawVector *raw_vec) {
if (docids_bitmap == nullptr) {
LOG(ERROR) << "docids_bitmap is NULL!";
return;
}
if (!docids_bitmap_) {
docids_bitmap_ = docids_bitmap;
}
if (!raw_vec_) {
raw_vec_ = raw_vec;
}
return;
}
const char *docids_bitmap_;
NI::RangeQueryResult *numeric_index_ptr_;
const RawVector *raw_vec_;
};
//} // anonymous namespace
struct RTInvertedLists : faiss::InvertedLists {
RTInvertedLists(realtime::RTInvertIndex *rt_invert_index_ptr, size_t nlist,
size_t code_size);
/*************************
* Read only functions */
// get the size of a list
size_t list_size(size_t list_no) const override;
/** get the codes for an inverted list
* must be released by release_codes
*
* @return codes size list_size * code_size
*/
const uint8_t *get_codes(size_t list_no) const override;
/** get the ids for an inverted list
* must be released by release_ids
*
* @return ids size list_size
*/
const idx_t *get_ids(size_t list_no) const override;
/*************************
* writing functions */
size_t add_entries(size_t list_no, size_t n_entry, const idx_t *ids,
const uint8_t *code) override;
void resize(size_t list_no, size_t new_size) override;
void update_entries(size_t list_no, size_t offset, size_t n_entry,
const idx_t *ids_in, const uint8_t *codes_in) override;
realtime::RTInvertIndex *rt_invert_index_ptr_;
};
/*************************************************************
* I/O macros
*
* we use macros so that we have a line number to report in abort
* (). This makes debugging a lot easier. The IOReader or IOWriter is
* always called f and thus is not passed in as a macro parameter.
**************************************************************/
#define WRITEANDCHECK(ptr, n) \
{ \
size_t ret = (*f)(ptr, sizeof(*(ptr)), n); \
FAISS_THROW_IF_NOT_FMT(ret == (n), "write error in %s: %ld != %ld (%s)", \
f->name.c_str(), ret, size_t(n), strerror(errno)); \
}
#define READANDCHECK(ptr, n) \
{ \
size_t ret = (*f)(ptr, sizeof(*(ptr)), n); \
FAISS_THROW_IF_NOT_FMT(ret == (n), "read error in %s: %ld != %ld (%s)", \
f->name.c_str(), ret, size_t(n), strerror(errno)); \
}
#define WRITE1(x) WRITEANDCHECK(&(x), 1)
#define READ1(x) READANDCHECK(&(x), 1)
#define WRITEVECTOR(vec) \
{ \
size_t size = (vec).size(); \
WRITEANDCHECK(&size, 1); \
WRITEANDCHECK((vec).data(), size); \
}
// will fail if we write 256G of data at once...
#define READVECTOR(vec) \
{ \
size_t size; \
READANDCHECK(&size, 1); \
FAISS_THROW_IF_NOT(size >= 0 && size < (1L << 40)); \
(vec).resize(size); \
READANDCHECK((vec).data(), size); \
}
/****************************************************************
* Write
*****************************************************************/
static void write_index_header(const faiss::Index *idx, faiss::IOWriter *f) {
WRITE1(idx->d);
WRITE1(idx->ntotal);
faiss::Index::idx_t dummy = 1 << 20;
WRITE1(dummy);
WRITE1(dummy);
WRITE1(idx->is_trained);
WRITE1(idx->metric_type);
}
static void write_ivf_header(const faiss::IndexIVF *ivf, faiss::IOWriter *f) {
write_index_header(ivf, f);
WRITE1(ivf->nlist);
WRITE1(ivf->nprobe);
faiss::write_index(ivf->quantizer, f);
WRITE1(ivf->maintain_direct_map);
WRITEVECTOR(ivf->direct_map);
}
static void read_index_header(faiss::Index *idx, faiss::IOReader *f) {
READ1(idx->d);
READ1(idx->ntotal);
faiss::Index::idx_t dummy;
READ1(dummy);
READ1(dummy);
READ1(idx->is_trained);
READ1(idx->metric_type);
idx->verbose = false;
}
static void
read_ivf_header(faiss::IndexIVF *ivf, faiss::IOReader *f,
std::vector<std::vector<faiss::Index::idx_t>> *ids = nullptr) {
read_index_header(ivf, f);
READ1(ivf->nlist);
READ1(ivf->nprobe);
ivf->quantizer = faiss::read_index(f);
ivf->own_fields = true;
if (ids) { // used in legacy "Iv" formats
ids->resize(ivf->nlist);
for (size_t i = 0; i < ivf->nlist; i++)
READVECTOR((*ids)[i]);
}
READ1(ivf->maintain_direct_map);
READVECTOR(ivf->direct_map);
}
static void write_ProductQuantizer(const faiss::ProductQuantizer *pq,
faiss::IOWriter *f) {
WRITE1(pq->d);
WRITE1(pq->M);
WRITE1(pq->nbits);
WRITEVECTOR(pq->centroids);
}
static void read_ProductQuantizer(faiss::ProductQuantizer *pq,
faiss::IOReader *f) {
READ1(pq->d);
READ1(pq->M);
READ1(pq->nbits);
pq->set_derived_values();
READVECTOR(pq->centroids);
}
// namespace {
struct FileIOReader : faiss::IOReader {
FILE *f = nullptr;
bool need_close = false;
FileIOReader(FILE *rf) : f(rf) {}
FileIOReader(const char *fname) {
name = fname;
f = fopen(fname, "rb");
FAISS_THROW_IF_NOT_FMT(f, "could not open %s for reading: %s", fname,
strerror(errno));
need_close = true;
}
~FileIOReader() override {
if (need_close) {
int ret = fclose(f);
if (ret != 0) { // we cannot raise and exception in the destructor
fprintf(stderr, "file %s close error: %s", name.c_str(),
strerror(errno));
}
}
}
size_t operator()(void *ptr, size_t size, size_t nitems) override {
return fread(ptr, size, nitems, f);
}
int fileno() override { return ::fileno(f); }
};
struct FileIOWriter : faiss::IOWriter {
FILE *f = nullptr;
bool need_close = false;
FileIOWriter(FILE *wf) : f(wf) {}
FileIOWriter(const char *fname) {
name = fname;
f = fopen(fname, "wb");
FAISS_THROW_IF_NOT_FMT(f, "could not open %s for writing: %s", fname,
strerror(errno));
need_close = true;
}
~FileIOWriter() override {
if (need_close) {
int ret = fclose(f);
if (ret != 0) {
// we cannot raise and exception in the destructor
fprintf(stderr, "file %s close error: %s", name.c_str(),
strerror(errno));
}
}
}
size_t operator()(const void *ptr, size_t size, size_t nitems) override {
return fwrite(ptr, size, nitems, f);
}
int fileno() override { return ::fileno(f); }
};
// } // anonymous namespace
struct GammaIVFPQIndex : GammaIndex, faiss::IndexIVFPQ {
GammaIVFPQIndex(faiss::Index *quantizer, size_t d, size_t nlist, size_t M,
size_t nbits_per_idx, const char *docids_bitmap,
RawVector *raw_vec, int nprobe);
faiss::InvertedListScanner *
get_InvertedListScanner(bool store_pairs) const override;
int Indexing() override;
int AddRTVecsToIndex() override;
bool Add(int n, const float *vec) override;
int Search(const VectorQuery *query, const GammaSearchCondition *condition,
VectorResult &result) override;
void search_preassigned(int n, const float *x,
const GammaSearchCondition *condition,
const idx_t *assign, const float *centroid_dis,
float *distances, idx_t *labels, int *total,
bool store_pairs,
const faiss::IVFSearchParameters *params = nullptr);
// assign the vectors, then call search_preassign
void SearchIVFPQ(int n, const float *x, const GammaSearchCondition *condition,
float *distances, idx_t *labels, int *total);
void SearchDirectly(int n, const float *x,
const GammaSearchCondition *condition, float *distances,
idx_t *labels, int *total);
long GetTotalMemBytes() override {
if (!rt_invert_index_ptr_) {
return 0;
}
return rt_invert_index_ptr_->GetTotalMemBytes();
}
int Dump(const std::string &dir) override {
if (!rt_invert_index_ptr_) {
LOG(INFO) << "realtime invert index ptr is null";
return -1;
}
if (!this->is_trained) {
LOG(INFO) << "gamma index is not trained, skip dumping";
return 0;
}
string info_file = dir + "/gamma_index.info";
faiss::IOWriter *f = new FileIOWriter(info_file.c_str());
const IndexIVFPQ *ivpq = static_cast<const IndexIVFPQ *>(this);
write_ivf_header(ivpq, f);
WRITE1(ivpq->by_residual);
WRITE1(ivpq->code_size);
tig_gamma::write_ProductQuantizer(&ivpq->pq, f);
delete f;
LOG(INFO) << "dump: d=" << ivpq->d << ", ntotal=" << ivpq->ntotal
<< ", is_trained=" << ivpq->is_trained
<< ", metric_type=" << ivpq->metric_type
<< ", nlist=" << ivpq->nlist << ", nprobe=" << ivpq->nprobe
<< ", maintain_direct_map=" << ivpq->maintain_direct_map
<< ", by_residual=" << ivpq->by_residual
<< ", code_size=" << ivpq->code_size << ", pq: d=" << ivpq->pq.d
<< ", M=" << ivpq->pq.M << ", nbits=" << ivpq->pq.nbits;
if (indexed_vec_count_ <= 0) {
LOG(INFO) << "no vector is indexed, do not need dump";
return 0;
}
return rt_invert_index_ptr_->Dump(dir, indexed_vec_count_ - 1);
}
int Load(const std::vector<std::string> &index_dirs) {
if (!rt_invert_index_ptr_) {
return -1;
}
string info_file = index_dirs[index_dirs.size() - 1] + "/gamma_index.info";
if (access(info_file.c_str(), F_OK) != 0) {
LOG(INFO) << info_file << " isn't existed, skip loading";
return 0; // it should train again after load
}
faiss::IOReader *f = new FileIOReader(info_file.c_str());
IndexIVFPQ *ivpq = static_cast<IndexIVFPQ *>(this);
read_ivf_header(ivpq, f, nullptr); // not legacy
READ1(ivpq->by_residual);
READ1(ivpq->code_size);
read_ProductQuantizer(&ivpq->pq, f);
// precomputed table not stored. It is cheaper to recompute it
ivpq->use_precomputed_table = 0;
if (ivpq->by_residual)
ivpq->precompute_table();
delete f;
if (!this->is_trained) {
LOG(ERROR) << "unexpected, gamma index information is loaded, but it "
"isn't trained";
return 0; // it should train again after load
}
indexed_vec_count_ = rt_invert_index_ptr_->Load(index_dirs);
LOG(INFO) << "load: d=" << ivpq->d << ", ntotal=" << ivpq->ntotal
<< ", is_trained=" << ivpq->is_trained
<< ", metric_type=" << ivpq->metric_type
<< ", nlist=" << ivpq->nlist << ", nprobe=" << ivpq->nprobe
<< ", maintain_direct_map=" << ivpq->maintain_direct_map
<< ", by_residual=" << ivpq->by_residual
<< ", code_size=" << ivpq->code_size << ", pq: d=" << ivpq->pq.d
<< ", M=" << ivpq->pq.M << ", nbits=" << ivpq->pq.nbits
<< ", indexed vector count=" << indexed_vec_count_;
return indexed_vec_count_;
}
int indexed_vec_count_;
realtime::RTInvertIndex *rt_invert_index_ptr_;
#ifdef PERFORMANCE_TESTING
std::atomic<uint64_t> search_count_;
#endif
};
} // namespace tig_gamma
#endif
|
strsm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/ztrsm.c, normal z -> s, Fri Sep 28 17:38:03 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_trsm
*
* Solves one of the matrix equations
*
* \f[ op( A ) \times X = \alpha B, \f] or
* \f[ X \times op( A ) = \alpha B, \f]
*
* where op( A ) is one of:
* \f[ op( A ) = A, \f]
* \f[ op( A ) = A^T, \f]
* \f[ op( A ) = A^T, \f]
*
* alpha is a scalar, X and B are m-by-n matrices, and
* A is a unit or non-unit, upper or lower triangular matrix.
* The matrix X overwrites B.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft: op(A)*X = B,
* - PlasmaRight: X*op(A) = B.
*
* @param[in] uplo
* - PlasmaUpper: A is upper triangular,
* - PlasmaLower: A is lower triangular.
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* - PlasmaNonUnit: A has non-unit diagonal,
* - PlasmaUnit: A has unit diagonal.
*
* @param[in] m
* The number of rows of the matrix B. m >= 0.
*
* @param[in] n
* The number of columns of the matrix B. n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* The k-by-k triangular matrix,
* where k = m if side = PlasmaLeft,
* and k = n if side = PlasmaRight.
* If uplo = PlasmaUpper, the leading k-by-k upper triangular part
* of the array A contains the upper triangular matrix, and the
* strictly lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading k-by-k lower triangular part
* of the array A contains the lower triangular matrix, and the
* strictly upper triangular part of A is not referenced.
* If diag = PlasmaUnit, the diagonal elements of A are also not
* referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,k).
*
* @param[in,out] pB
* On entry, the m-by-n right hand side matrix B.
* On exit, if return value = 0, the m-by-n solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_strsm
* @sa plasma_ctrsm
* @sa plasma_dtrsm
* @sa plasma_strsm
*
******************************************************************************/
int plasma_strsm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
float alpha, float *pA, int lda,
float *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((side != PlasmaLeft) &&
(side != PlasmaRight)) {
plasma_error("illegal value of side");
return -1;
}
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -2;
}
if ((transa != PlasmaConjTrans) &&
(transa != PlasmaNoTrans) &&
(transa != PlasmaTrans )) {
plasma_error("illegal value of transa");
return -3;
}
if ((diag != PlasmaUnit) &&
(diag != PlasmaNonUnit)) {
plasma_error("illegal value of diag");
return -4;
}
if (m < 0) {
plasma_error("illegal value of m");
return -5;
}
if (n < 0) {
plasma_error("illegal value of n");
return -6;
}
int an;
if (side == PlasmaLeft)
an = m;
else
an = n;
if (lda < imax(1, an)) {
plasma_error("illegal value of lda");
return -8;
}
if (ldb < imax(1, m)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if ((m == 0) || (n == 0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_trsm(plasma, PlasmaRealFloat, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
an, an, 0, 0, an, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
m, n, 0, 0, m, n, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_sge2desc(pA, lda, A, &sequence, &request);
plasma_omp_sge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_strsm(side, uplo, transa, diag,
alpha, A,
B,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_sdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_trsm
*
* Computes triangular solve.
* Non-blocking tile version of plasma_strsm().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft: op(A)*X = B,
* - PlasmaRight: X*op(A) = B.
*
* @param[in] uplo
* - PlasmaUpper: A is upper triangular,
* - PlasmaLower: A is lower triangular.
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* - PlasmaNonUnit: A has non-unit diagonal,
* - PlasmaUnit: A has unit diagonal.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_strsm
* @sa plasma_omp_ctrsm
* @sa plasma_omp_dtrsm
* @sa plasma_omp_strsm
*
******************************************************************************/
void plasma_omp_strsm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
float alpha, plasma_desc_t A,
plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((side != PlasmaLeft) &&
(side != PlasmaRight)) {
plasma_error("illegal value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((transa != PlasmaConjTrans) &&
(transa != PlasmaNoTrans) &&
(transa != PlasmaTrans)) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((diag != PlasmaUnit) &&
(diag != PlasmaNonUnit)) {
plasma_error("illegal value of diag");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if ((B.m == 0) || (B.n == 0))
return;
// Call the parallel function.
plasma_pstrsm(side, uplo,
transa, diag,
alpha, A,
B,
sequence, request);
}
|
multiply_matrices.c | #include "matmult.h"
/*
* struct for the multithreaded multiply_matrices_part() function
*/
struct mult_mat_struct
{
struct mat a, b, c;
size_t first_row, last_row;
};
/*
* multiplies some rows of matrix a with the matrix b
* if first row is bigger than last row, first row is out of bound, last row is out of bound or the matrix sizes don't match, nothing is done
* note: the last row is not included in the calculation
* @param pointer to mat_mult_struct with matrices and
* @return nothing
*/
void* multiply_matrices_part(void* p)
{
size_t x, y, i;
size_t index = 0;
struct mult_mat_struct* s = (struct mult_mat_struct*)p;
//exit if the argument was a null pointer
//exit if the matrix sizes don't match
if(s->a.n != s->c.n || s->b.n != s->c.n)
return NULL;
//exit if first_row or last_row are out of bound
if(s->first_row >= s->c.n || s->last_row > s->c.n)
return NULL;
//multiply part of the matrix
for(y = s->first_row; y < s->last_row; ++y)
{
for(x = 0; x < s->c.n; ++x)
{
//scalar product
index = y * s->c.n + x;
s->c.data[index] = 0;
for(i = 0; i < s->c.n; ++i)
{
s->c.data[index] += s->a.data[y * s->c.n + i] * s->b.data[i * s->c.n + x];
}
}
}
return NULL;
}
/*
* see header file
*/
void multiply_matrices(struct mat c, struct mat a, struct mat b)
{
#ifdef NO_OPENMP
long int num_cores = get_num_cpus();
int use_single_core = 1;
size_t rows;
unsigned int i;
pthread_t* threads;
struct mult_mat_struct* args;
if(num_cores > 1)
{
if((size_t)num_cores > c.n)
{
//use fewer cores to match the matrix sizes
num_cores = c.n;
}
threads = (pthread_t*)malloc(sizeof(pthread_t) * num_cores);
args = (struct mult_mat_struct*)malloc(sizeof(struct mult_mat_struct) * num_cores);
//start threads
if(threads != NULL && args != NULL)
{
use_single_core = 0;
for(i = 0; i < num_cores; ++i)
{
rows = c.n / num_cores;
args[i].a = a;
args[i].b = b;
args[i].c = c;
args[i].first_row = i * rows;
args[i].last_row = args[i].first_row + rows;
if(i == (num_cores - 1))
{
//avoid leaving out the last lines because of inaccuracy
args[i].last_row = c.n;
}
if(pthread_create(&threads[i], NULL, multiply_matrices_part, &args[i]) != 0)
{
use_single_core = 1;
break;
}
}
//join threads
for(i = 0; i < num_cores; ++i)
{
if(pthread_join(threads[i], NULL) != 0)
{
//NOTE: do not break here to allow other threads to finish!
use_single_core = 1;
}
}
}
}
//use only 1 cpu if there are not more available or something failed
if(use_single_core == 1)
{
struct mult_mat_struct s;
s.a = a;
s.b = b;
s.c = c;
s.first_row = 0;
s.last_row = c.n;
multiply_matrices_part(&s);
}
#else
size_t x, y, i, index;
//exit if the matrix sizes don't match
if(a.n != c.n || b.n != c.n)
return;
#pragma omp parallel for
for(y = 0; y < c.n; ++y)
{
for(x = 0; x < c.n; ++x)
{
//scalar product
index = y * c.n + x;
c.data[index] = 0;
for(i = 0; i < c.n; ++i)
{
c.data[index] += a.data[y * c.n + i] * b.data[i * c.n + x];
}
}
}
#endif
}
|
GB_unop__identity_int64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_fp64)
// op(A') function: GB (_unop_tran__identity_int64_fp64)
// C type: int64_t
// A type: double
// cast: int64_t cij = GB_cast_to_int64_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = GB_cast_to_int64_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = GB_cast_to_int64_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_fp64)
(
int64_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
int64_t z = GB_cast_to_int64_t ((double) (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
int64_t z = GB_cast_to_int64_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
parallel_file_map.h | // Copyright 2019-2020 the ProGraML authors.
//
// Contact Chris Cummins <chrisc.101@gmail.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <algorithm>
#include <atomic>
#include <cstdlib>
#include <ctime>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <string>
#include "boost/filesystem.hpp"
#include "labm8/cpp/app.h"
namespace fs = boost::filesystem;
using std::string;
using std::vector;
DECLARE_int32(limit);
namespace programl {
namespace task {
namespace dataflow {
vector<fs::path> EnumerateProgramGraphFiles(const fs::path& root);
inline std::chrono::milliseconds Now() {
return std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch());
}
// chunk_size: The size of file path chunks to execute in worker
// thread inner loops. A larger chunk size creates more infrequent
// status updates.
template <void (*ProcessOne)(const fs::path&, const fs::path&),
size_t chunkSize = 16>
void ParallelFileMap(const fs::path& path, const vector<fs::path>& files) {
std::chrono::milliseconds startTime = Now();
std::atomic_uint64_t fileCount{0};
const size_t n = FLAGS_limit
? std::min(size_t(files.size()), size_t(FLAGS_limit))
: files.size();
#pragma omp parallel for
for (size_t j = 0; j < n; j += chunkSize) {
for (size_t i = j; i < std::min(n, j + chunkSize); ++i) {
ProcessOne(path, files[i]);
}
fileCount += chunkSize;
uint64_t localFileCount = fileCount;
std::chrono::milliseconds now = Now();
int msPerGraph = ((now - startTime) / localFileCount).count();
std::cout << "\r\033[K" << localFileCount << " of " << n
<< " files processed (" << msPerGraph << " ms / file, "
<< std::setprecision(3)
<< (localFileCount / static_cast<float>(n)) * 100 << "%)"
<< std::flush;
}
std::cout << std::endl;
}
} // namespace dataflow
} // namespace task
} // namespace programl
|
CutPursuit_L2.h | #pragma once
#include "CutPursuit.h"
#include "Common.h"
namespace CP {
template <typename T>
class CutPursuit_L2 : public CutPursuit<T>
{
public:
~CutPursuit_L2(){
};
//=============================================================================================
//============================= COMPUTE ENERGY ===========================================
//=============================================================================================
virtual std::pair<T,T> compute_energy() override
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
//the first element pair_energy of is the fidelity and the second the penalty
std::pair<T,T> pair_energy;
T energy = 0;
//#pragma omp parallel for private(i_dim) if (this->parameter.parallel) schedule(static) reduction(+:energy,i)
for (uint32_t ind_ver = 0; ind_ver < this->nVertex; ind_ver++)
{
VertexDescriptor<T> i_ver = boost::vertex(ind_ver, this->main_graph);
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
energy += .5*vertex_attribute_map(i_ver).weight
* pow(vertex_attribute_map(i_ver).observation[i_dim]
- vertex_attribute_map(i_ver).value[i_dim],2);
}
}
pair_energy.first = energy;
energy = 0;
EdgeIterator<T> i_edg, i_edg_end = boost::edges(this->main_graph).second;
for (i_edg = boost::edges(this->main_graph).first; i_edg != i_edg_end; ++i_edg)
{
if (!edge_attribute_map(*i_edg).realEdge)
{
continue;
}
energy += .5 * edge_attribute_map(*i_edg).isActive * this->parameter.reg_strenth
* edge_attribute_map(*i_edg).weight;
}
pair_energy.second = energy;
return pair_energy;
}
//=============================================================================================
//============================= SPLIT ===========================================
//=============================================================================================
virtual uint32_t split() override
{ // split the graph by trying to find the best binary partition
// each components is split into B and notB
// for each components we associate the value h_1 and h_2 to vertices in B or notB
// the affectation as well as h_1 and h_2 are computed alternatively
//tic();
//--------loading structures---------------------------------------------------------------
uint32_t nb_comp = this->components.size();
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph);
uint32_t saturation;
//stores wether each vertex is B or not
std::vector<bool> binary_label(this->nVertex);
//initialize the binary partition with kmeans
this->init_labels(binary_label);
//centers is the value of each binary component in the optimal partition
VectorOfCentroids<T> centers(nb_comp, this->dim);
//-----main loop----------------------------------------------------------------
// the optimal flow is iteratively approximated
for (uint32_t i_step = 1; i_step <= this->parameter.flow_steps; i_step++)
{
//the regularization strength at this step
//compute h_1 and h_2
centers = VectorOfCentroids<T>(nb_comp, this->dim);
this->compute_centers(centers, nb_comp,binary_label);
this->set_capacities(centers);
// update the capacities of the flow graph
boost::boykov_kolmogorov_max_flow(
this->main_graph,
get(&EdgeAttribute<T>::capacity , this->main_graph),
get(&EdgeAttribute<T>::residualCapacity, this->main_graph),
get(&EdgeAttribute<T>::edge_reverse , this->main_graph),
get(&VertexAttribute<T>::color , this->main_graph),
get(boost::vertex_index , this->main_graph),
this->source,
this->sink);
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
if (this->saturated_components[ind_com])
{
continue;
}
for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++)
{
binary_label[vertex_index_map(this->components[ind_com][i_ver])]
= (vertex_attribute_map(this->components[ind_com][i_ver]).color
== vertex_attribute_map(this->sink).color);
}
}
}
saturation = this->activate_edges();
return saturation;
}
//=============================================================================================
//============================= INIT_L2 ====== ===========================================
//=============================================================================================
inline void init_labels(std::vector<bool> & binary_label)
{ //-----initialize the labelling for each components with kmeans------------------------------
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph);
uint32_t nb_comp = this->components.size();
// ind_com;
//#pragma omp parallel for private(ind_com) //if (nb_comp>=8) schedule(dynamic)
#ifdef OPENMP
#pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic)
#endif
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
std::vector< std::vector<T> > kernels(2, std::vector<T>(this->dim));
T total_weight[2];
T best_energy;
T current_energy;
uint32_t comp_size = this->components[ind_com].size();
std::vector<bool> potential_label(comp_size);
std::vector<T> energy_array(comp_size);
if (this->saturated_components[ind_com] || comp_size <= 1)
{
continue;
}
for (uint32_t init_kmeans = 0; init_kmeans < this->parameter.kmeans_resampling; init_kmeans++)
{//proceed to several initilialisation of kmeans and pick up the best one
//----- initialization with KM++ ------------------
uint32_t first_kernel = std::rand() % comp_size, second_kernel = 0; // first kernel attributed
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] = vertex_attribute_map(this->components[ind_com][first_kernel ]).observation[i_dim];
}
best_energy = 0; //now compute the square distance of each pouint32_tto this kernel
#ifdef OPENMP
#pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(best_energy) schedule(static)
#endif
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
energy_array[i_ver] = 0;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
energy_array[i_ver] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[0][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
best_energy += energy_array[i_ver];
} // we now generate a random number to determinate which node will be the second kernel
T random_sample = ((T)(rand())) / ((T)(RAND_MAX));
current_energy = best_energy * random_sample;
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
current_energy -= energy_array[i_ver];
if (current_energy < 0)
{ //we have selected the second kernel
second_kernel = i_ver;
break;
}
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{ // now fill the second kernel
kernels[1][i_dim] = vertex_attribute_map(this->components[ind_com][second_kernel]).observation[i_dim];
}
//----main kmeans loop-----
for (uint32_t ite_kmeans = 0; ite_kmeans < this->parameter.kmeans_ite; ite_kmeans++)
{
//--affectation step: associate each node with its closest kernel-------------------
#ifdef OPENMP
#pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static)
#endif
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
std::vector<T> distance_kernels(2);
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
distance_kernels[0] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[0][i_dim],2);
distance_kernels[1] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[1][i_dim],2);
}
potential_label[i_ver] = distance_kernels[0] > distance_kernels[1];
}
//-----computation of the new kernels----------------------------
total_weight[0] = 0.;
total_weight[1] = 0.;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] = 0;
kernels[1][i_dim] = 0;
}
#ifdef OPENMP
#pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static)
#endif
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
if (vertex_attribute_map(this->components[ind_com][i_ver]).weight==0)
{
continue;
}
if (potential_label[i_ver])
{
total_weight[0] += vertex_attribute_map(this->components[ind_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][i_ver]).weight ;
}
}
else
{
total_weight[1] += vertex_attribute_map(this->components[ind_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[1][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
}
}
if ((total_weight[0] == 0)||(total_weight[1] == 0))
{
//std::cout << "kmeans error : " << comp_size << std::endl;
break;
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
kernels[0][i_dim] = kernels[0][i_dim] / total_weight[0];
kernels[1][i_dim] = kernels[1][i_dim] / total_weight[1];
}
}
//----compute the associated energy ------
current_energy = 0;
#ifdef OPENMP
#pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static)
#endif
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
if (potential_label[i_ver])
{
current_energy += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[0][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
else
{
current_energy += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
- kernels[1][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
}
}
if (current_energy < best_energy)
{
best_energy = current_energy;
for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++)
{
binary_label[vertex_index_map(this->components[ind_com][i_ver])] = potential_label[i_ver];
}
}
}
}
}
//=============================================================================================
//============================= COMPUTE_CENTERS_L2 ==========================================
//=============================================================================================
inline void compute_centers(VectorOfCentroids<T> & centers, const uint32_t & nb_comp
, const std::vector<bool> & binary_label)
{
//compute for each component the values of h_1 and h_2
#ifdef OPENMP
#pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic)
#endif
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
if (this->saturated_components[ind_com])
{
continue;
}
compute_center(centers.centroids[ind_com], ind_com, binary_label);
}
return;
}
//=============================================================================================
//============================= COMPUTE_CENTERS_L2 ==========================================
//=============================================================================================
inline void compute_center( std::vector< std::vector<T> > & center, const uint32_t & ind_com
, const std::vector<bool> & binary_label)
{
//compute for each component the values of the centroids corresponding to the optimal binary partition
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph);
T total_weight[2];
total_weight[0] = 0.;
total_weight[1] = 0.;
//#pragma omp parallel for if (this->parameter.parallel)
for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++)
{
if (vertex_attribute_map(this->components[ind_com][i_ver]).weight==0)
{
continue;
}
if (binary_label[vertex_index_map(this->components[ind_com][i_ver])])
{
total_weight[0] += vertex_attribute_map(this->components[ind_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
center[0][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][i_ver]).weight ;
}
}
else
{
total_weight[1] += vertex_attribute_map(this->components[ind_com][i_ver]).weight;
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
center[1][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][i_ver]).weight;
}
}
}
if ((total_weight[0] == 0)||(total_weight[1] == 0))
{
//the component is saturated
this->saturateComponent(ind_com);
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
center[0][i_dim] = vertex_attribute_map(this->components[ind_com][0]).value[i_dim];
center[1][i_dim] = vertex_attribute_map(this->components[ind_com][0]).value[i_dim];
}
}
else
{
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
center[0][i_dim] = center[0][i_dim] / total_weight[0];
center[1][i_dim] = center[1][i_dim] / total_weight[1];
}
}
return;
}
//=============================================================================================
//============================= SET_CAPACITIES ==========================================
//=============================================================================================
inline void set_capacities(const VectorOfCentroids<T> & centers)
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
EdgeAttributeMap<T> edge_attribute_map
= boost::get(boost::edge_bundle, this->main_graph);
//----first compute the capacity in sink/node edges------------------------------------
//#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic)
uint32_t nb_comp = this->components.size();
#ifdef OPENMP
#pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic)
#endif
for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++)
{
VertexDescriptor<T> desc_v;
EdgeDescriptor desc_source2v, desc_v2sink, desc_v2source;
T cost_B, cost_notB; //the cost of being in B or not B, local for each component
if (this->saturated_components[ind_com])
{
continue;
}
for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++)
{
desc_v = this->components[ind_com][i_ver];
// because of the adjacency structure NEVER access edge (source,v) directly!
desc_v2source = boost::edge(desc_v, this->source,this->main_graph).first;
desc_source2v = edge_attribute_map(desc_v2source).edge_reverse; //use edge_reverse instead
desc_v2sink = boost::edge(desc_v, this->sink,this->main_graph).first;
cost_B = 0;
cost_notB = 0;
if (vertex_attribute_map(desc_v).weight==0)
{ //no observation - no cut
edge_attribute_map(desc_source2v).capacity = 0;
edge_attribute_map(desc_v2sink).capacity = 0;
continue;
}
for(uint32_t i_dim=0; i_dim < this->dim; i_dim++)
{
cost_B += 0.5*vertex_attribute_map(desc_v).weight
* (pow(centers.centroids[ind_com][0][i_dim],2) - 2 * (centers.centroids[ind_com][0][i_dim]
* vertex_attribute_map(desc_v).observation[i_dim]));
cost_notB += 0.5*vertex_attribute_map(desc_v).weight
* (pow(centers.centroids[ind_com][1][i_dim],2) - 2 * (centers.centroids[ind_com][1][i_dim]
* vertex_attribute_map(desc_v).observation[i_dim]));
}
if (cost_B>cost_notB)
{
edge_attribute_map(desc_source2v).capacity = cost_B - cost_notB;
edge_attribute_map(desc_v2sink).capacity = 0.;
}
else
{
edge_attribute_map(desc_source2v).capacity = 0.;
edge_attribute_map(desc_v2sink).capacity = cost_notB - cost_B;
}
}
}
//----then set the vertex to vertex edges ---------------------------------------------
EdgeIterator<T> i_edg, i_edg_end;
for (boost::tie(i_edg, i_edg_end) = boost::edges(this->main_graph);
i_edg != i_edg_end; ++i_edg)
{
if (!edge_attribute_map(*i_edg).realEdge)
{
continue;
}
if (!edge_attribute_map(*i_edg).isActive)
{
edge_attribute_map(*i_edg).capacity
= edge_attribute_map(*i_edg).weight * this->parameter.reg_strenth;
}
else
{
edge_attribute_map(*i_edg).capacity = 0;
}
}
}
//=============================================================================================
//================================= COMPUTE_VALUE =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_value(const uint32_t & ind_com) override
{
VertexAttributeMap<T> vertex_attribute_map
= boost::get(boost::vertex_bundle, this->main_graph);
T total_weight = 0;
std::vector<T> compValue(this->dim);
std::fill((compValue.begin()),(compValue.end()),0);
#ifdef OPENMP
#pragma omp parallel for if (this->parameter.parallel) schedule(static)
#endif
for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver)
{
total_weight += vertex_attribute_map(this->components[ind_com][ind_ver]).weight;
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
compValue[i_dim] += vertex_attribute_map(this->components[ind_com][ind_ver]).observation[i_dim]
* vertex_attribute_map(this->components[ind_com][ind_ver]).weight;
}
vertex_attribute_map(this->components[ind_com][ind_ver]).in_component = ind_com;
}
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
compValue[i_dim] = compValue[i_dim] / total_weight;
}
for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver)
{
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
vertex_attribute_map(this->components[ind_com][ind_ver]).value[i_dim] = compValue[i_dim];
}
}
return std::pair<std::vector<T>, T>(compValue, total_weight);
}
//=============================================================================================
//================================= COMPUTE_MERGE_GAIN =========================================
//=============================================================================================
virtual std::pair<std::vector<T>, T> compute_merge_gain(const VertexDescriptor<T> & comp1
, const VertexDescriptor<T> & comp2) override
{
VertexAttributeMap<T> reduced_vertex_attribute_map
= boost::get(boost::vertex_bundle, this->reduced_graph);
std::vector<T> merge_value(this->dim);
T gain = 0;
// compute the value obtained by mergeing the two connected components
for(uint32_t i_dim=0; i_dim<this->dim; i_dim++)
{
merge_value[i_dim] =
(reduced_vertex_attribute_map(comp1).weight *
reduced_vertex_attribute_map(comp1).value[i_dim]
+reduced_vertex_attribute_map(comp2).weight *
reduced_vertex_attribute_map(comp2).value[i_dim])
/(reduced_vertex_attribute_map(comp1).weight
+reduced_vertex_attribute_map(comp2).weight);
gain += 0.5 * (pow(merge_value[i_dim],2)
* (reduced_vertex_attribute_map(comp1).weight
+reduced_vertex_attribute_map(comp2).weight)
- pow(reduced_vertex_attribute_map(comp1).value[i_dim],2)
* reduced_vertex_attribute_map(comp1).weight
- pow(reduced_vertex_attribute_map(comp2).value[i_dim],2)
* reduced_vertex_attribute_map(comp2).weight);
}
return std::pair<std::vector<T>, T>(merge_value, gain);
}
};
}
|
boundary_mask_mex.c | #include <inttypes.h>
#include <omp.h>
#include "mex.h"
#include "boundary_mask_mex.h"
void boundary_mask(uint8_t *B, const uint8_t *G, const size_t *sz);
#ifdef BOUNDARY_MASK_MEX
void
mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if ((nrhs != 2) || (nlhs > 1)) {
mexErrMsgTxt("Usage: boundary_mask_mex(B, G);");
}
uint8_t *B = (uint8_t *)mxGetData(prhs[0]);
const uint8_t *G = (const uint8_t *)mxGetData(prhs[1]);
const size_t *sz = (const size_t *)mxGetDimensions(prhs[0]);
boundary_mask(B, G, sz);
if (nlhs == 1) {
plhs[0] = mxCreateDoubleScalar(1.0);
}
return;
}
#endif
void
mx_boundary_mask(mxArray *mxB, const mxArray *mxG)
{
uint8_t *B = (uint8_t *)mxGetData(mxB);
const uint8_t *G = (const uint8_t *)mxGetData(mxG);
const size_t *sz = (const size_t *)mxGetDimensions(mxB);
boundary_mask(B, G, sz);
return;
}
void
boundary_mask(uint8_t *B, const uint8_t *G, const size_t *sz)
{
size_t i, j, k;
size_t l;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t NX = nx-2;
const size_t NY = nx*(ny-2);
const size_t NZ = nxny*(nz-2);
const size_t nx2 = 2*nx;
const size_t nxny2 = 2*nxny;
/* offset indices */
const size_t o110 = 1 + nx + 0;
const size_t o101 = 1 + 0 + nxny;
const size_t o011 = 0 + nx + nxny;
const size_t o111 = 1 + nx + nxny;
uint8_t *b = (uint8_t *)calloc(nx*ny*nz, sizeof(*G));
/* boundary of grid */
#pragma omp parallel for private(i,j,k,l) schedule(static) \
if (nxny*nz > 32*32*32)
for(k = nxny; k <= NZ; k += nxny) {
for(j = nx; j <= NY; j += nx) {
l = 1 + j + k;
for(i = 1; i <= NX; ++i, ++l) {
if ((i == 1) || (j == nx) || (k == nxny) ||
(i == NX) || (j == NY) || (k == NZ)) {
b[l] = G[l];
}
}
}
}
/* interior */
#pragma omp parallel for private(i,j,k,l) schedule(static) \
if (nxny*nz > 32*32*32)
for(k = nxny; k <= NZ; k += nxny2) {
for(j = nx; j <= NY; j += nx2) {
l = 1 + j + k;
for(i = 1; i <= NX; i += 2, l += 2) {
if (!(G[l] && G[l+1] && G[l+nx] && G[l+nxny] &&
G[l+o110] && G[l+o101] && G[l+o011] && G[l+o111])) {
b[l] = G[l];
b[l+1] = G[l+1];
b[l+nx] = G[l+nx];
b[l+nxny] = G[l+nxny];
b[l+o110] = G[l+o110];
b[l+o101] = G[l+o101];
b[l+o011] = G[l+o011];
b[l+o111] = G[l+o111];
}
}
}
}
/* grow boundary band */
#pragma omp parallel for private(i,j,k,l) schedule(static) \
if (nxny*nz > 32*32*32)
for(k = nxny; k <= NZ; k += nxny) {
for(j = nx; j <= NY; j += nx) {
l = 1 + j + k;
for(i = 1; i <= NX; ++i, ++l) {
if (G[l]) {
B[l] = b[l-nxny] || b[l-nx] || b[l-1] || b[l] ||
b[l+1] || b[l+nx] || b[l+nxny];
}
}
}
}
if (NULL != b) {
free(b);
b = NULL;
}
return;
}
|
DRB015-outofbounds-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The outmost loop is be parallelized.
But the inner level loop has out of bound access for b[i][j] when j equals to 0.
This will case memory access of a previous row's last element.
For example, an array of 4x4:
j=0 1 2 3
i=0 x x x x
1 x x x x
2 x x x x
3 x x x x
outer loop: i=2,
inner loop: j=0
array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3]
due to linearized row-major storage of the 2-D array.
This causes loop-carried data dependence between i=2 and i=1.
Data race pair: b[i][j]@80:7 vs. b[i][j-1]@80:15
*/
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i,j;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int n=len, m=len;
double b[n][m];
#pragma omp parallel for private(j) schedule(dynamic)
for (i=1;i<n;i++)
for (j=0;j<m;j++) // Note there will be out of bound access
b[i][j]=b[i][j-1];
return 0;
}
|
GB_unop__tan_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__tan_fc32_fc32
// op(A') function: GB_unop_tran__tan_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = ctanf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ctanf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = ctanf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TAN || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__tan_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ctanf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ctanf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__tan_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | #include <stdio.h>
#include <omp.h>
#define N 100
void populate_vector(int vector[], int value, int length)
{
for(int i=0; i<length; i++)
vector[i] = value;
}
void reduction_example_1()
{
int vector [N] = {0};
int sum=0;
populate_vector(vector, 1, N);
#pragma omp parallel for reduction(+:sum)
for(int i=0; i<N; ++i)
sum += vector[i];
printf("The reducion in example 1 is %d\n", sum);
}
void reduction_example_2()
{
int a[N], b[N], result,i, chunk;
chunk = 10;
result = 0.0;
populate_vector(a, 1, N);
populate_vector(b, 3, N);
#pragma omp parallel for \
default(shared) private(i) \
schedule(static,chunk) \
reduction(+:result)
for (i=0; i < N; i++)
result = result + (a[i] * b[i]);
printf("The reducion in example 2 is %d\n",result);
}
int main()
{
reduction_example_1();
reduction_example_2();
return 0;
}
|
PeptideIndexing.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2021.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Andreas Bertsch, Chris Bielow $
// --------------------------------------------------------------------------
#pragma once
#include <OpenMS/ANALYSIS/ID/AhoCorasickAmbiguous.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/DATASTRUCTURES/DefaultParamHandler.h>
#include <OpenMS/DATASTRUCTURES/FASTAContainer.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/DATASTRUCTURES/StringUtils.h>
#include <OpenMS/DATASTRUCTURES/SeqanIncludeWrapper.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/METADATA/PeptideEvidence.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/SYSTEM/StopWatch.h>
#include <OpenMS/SYSTEM/SysInfo.h>
#include <atomic>
#include <algorithm>
#include <fstream>
namespace OpenMS
{
/**
@brief Refreshes the protein references for all peptide hits in a vector of PeptideIdentifications and adds target/decoy information.
All peptide and protein hits are annotated with target/decoy information, using the meta value "target_decoy". For proteins the possible values are "target" and "decoy",
depending on whether the protein accession contains the decoy pattern (parameter @p decoy_string) as a suffix or prefix, respectively (see parameter @p prefix).
For peptides, the possible values are "target", "decoy" and "target+decoy", depending on whether the peptide sequence is found only in target proteins,
only in decoy proteins, or in both. The target/decoy information is crucial for the @ref TOPP_FalseDiscoveryRate tool.
(For FDR calculations, "target+decoy" peptide hits count as target hits.)
@note Make sure that your protein names in the database contain a correctly formatted decoy string. This can be ensured by using @ref UTILS_DecoyDatabase.
If the decoy identifier is not recognized successfully all proteins will be assumed to stem from the target-part of the query.<br>
E.g., "sw|P33354_DECOY|YEHR_ECOLI Uncharacterized lipop..." is <b>invalid</b>, since the tool has no knowledge of how SwissProt entries are build up.
A correct identifier could be "DECOY_sw|P33354|YEHR_ECOLI Uncharacterized li ..." or "sw|P33354|YEHR_ECOLI_DECOY Uncharacterized li", depending on whether you are
using prefix or suffix annotation.<br>
Some helpful target/decoy statistics will be reported when done.
By default this tool will fail if an unmatched peptide occurs, i.e. if the database does not contain the corresponding protein.
You can force it to return successfully in this case by setting @p '-unmatched_action' to accept or even remove those hits.
Search engines (such as Mascot) will replace ambiguous amino acids ('B', 'J', 'Z' and 'X') in the protein database with unambiguous amino acids in the reported peptides, e.g. exchange 'X' with 'H'.
This will cause such peptides to not be found by exactly matching their sequences to the protein database.
However, we can recover these cases by using tolerant search for ambiguous amino acids in the protein sequence. This is done by default with up to four amino acids
per peptide hit. If you only want exact matches, set @p aaa_max to zero (but expect that unmatched peptides might occur)!
Leucine/Isoleucine:
Further complications can arise due to the presence of the isobaric amino acids isoleucine ('I') and leucine ('L') in protein sequences.
Since the two have the exact same chemical composition and mass, they generally cannot be distinguished by mass spectrometry.
If a peptide containing 'I' was reported as a match for a spectrum, a peptide containing 'L' instead would be an equally good match (and vice versa).
To account for this inherent ambiguity, setting the flag @p IL_equivalent causes 'I' and 'L' to be considered as indistinguishable.@n
For example, if the sequence "PEPTIDE" (matching "Protein1") was identified as a search hit,
but the database additionally contained "PEPTLDE" (matching "Protein2"), running PeptideIndexer with the @p IL_equivalent option would
report both "Protein1" and "Protein2" as accessions for "PEPTIDE".
(This is independent of ambiguous matching via @p aaa_max.)
Additionally, setting this flag will convert all 'J's in any protein sequence to 'I'. This way, no tolerant search is required for 'J' (but is still possible for all
the other ambiguous amino acids).
If @p write_protein_sequences is requested and @p IL_equivalent is set as well, both the I/L-version and unmodified protein sequences need to be stored internally.
This requires some extra memory, roughly equivalent to the size of the FASTA database file itself.
Enzyme specificity:
Once a peptide sequence is found in a protein sequence, this does <b>not</b> imply that the hit is valid! This is where enzyme specificity comes into play.
By default, the enzyme and the specificity used during search is derived from metadata in the idXML files ('auto' setting).
We make two exceptions to any specificity constraints:
1) for peptides starting at the second or third position of a protein are still considered N-terminally specific,
since the residues can be cleaved off in vivo; X!Tandem reports these peptides. For example, the two peptides ABAR and LABAR would both match a protein starting with MLABAR.
2) adventitious cleavage at Asp|Pro (Aspartate/D | Proline/P) is allowed for all enzymes (as supported by X!Tandem), i.e. counts as a proper cleavage site (see http://www.thegpm.org/tandem/release.html).
You can relax the requirements further by choosing <tt>semi-tryptic</tt> (only one of two "internal" termini must match requirements)
or <tt>none</tt> (essentially allowing all hits, no matter their context). These settings should not be used (due to high risk of reporting false positives),
unless the search engine was instructed to search peptides in the same way (but then the default 'auto' setting will do the correct thing).
X!Tandem treats any occurrence of 'X' as stop codon (and thus as cleavage site). The resulting peptide will be non- or semi-tryptic.
Those hits will not be matched and need to be removed using @p '-unmatched_action' (do not use termini specificity to cheat around it! It adds more false hits!).
The FASTA file should not contain duplicate protein accessions (since accessions are not validated) if a correct unique-matching annotation is important (target/decoy annotation is still correct).
Threading:
This tool support multiple threads (@p threads option) to speed up computation, at the cost of little extra memory.
*/
class OPENMS_DLLAPI PeptideIndexing :
public DefaultParamHandler, public ProgressLogger
{
public:
/// name of enzyme/specificity which signals that the enzyme/specificity should be taken from meta information
static char const* const AUTO_MODE; /* = 'auto' */
/// Exit codes
enum ExitCodes
{
EXECUTION_OK,
DATABASE_EMPTY,
PEPTIDE_IDS_EMPTY,
ILLEGAL_PARAMETERS,
UNEXPECTED_RESULT
};
/// Action to take when peptide hits could not be matched
enum class Unmatched
{
IS_ERROR, ///< throws an error (and returns no results)
WARN, ///< skips annotation with target/decoy but returns with 'success'
REMOVE, ///< removes unmatched hits entirely and returns with 'success'
SIZE_OF_UNMATCHED
};
static const std::array<std::string, (Size)Unmatched::SIZE_OF_UNMATCHED> names_of_unmatched;
enum class MissingDecoy
{
IS_ERROR,
WARN,
SILENT,
SIZE_OF_MISSING_DECOY
};
static const std::array<std::string, (Size)MissingDecoy::SIZE_OF_MISSING_DECOY> names_of_missing_decoy;
/// Default constructor
PeptideIndexing();
/// Default destructor
~PeptideIndexing() override;
/// forward for old interface and pyOpenMS; use run<T>() for more control
inline ExitCodes run(std::vector<FASTAFile::FASTAEntry>& proteins, std::vector<ProteinIdentification>& prot_ids, std::vector<PeptideIdentification>& pep_ids)
{
FASTAContainer<TFI_Vector> protein_container(proteins);
return run<TFI_Vector>(protein_container, prot_ids, pep_ids);
}
/**
@brief Re-index peptide identifications honoring enzyme cutting rules, ambiguous amino acids and target/decoy hits.
Template parameter 'T' can be either TFI_File or TFI_Vector. If the data is already available, use TFI_Vector and pass the vector.
If the data is still in a FASTA file and its not needed afterwards for additional processing, use TFI_File and pass the filename.
PeptideIndexer refreshes target/decoy information and mapping of peptides to proteins.
The target/decoy information is crucial for the @ref TOPP_FalseDiscoveryRate tool. (For FDR calculations, "target+decoy" peptide hits count as target hits.)
PeptideIndexer allows for ambiguous amino acids (B|J|Z|X) in the protein database, but not in the peptide sequences.
For the latter only I/L can be treated as equivalent (see 'IL_equivalent' flag), but 'J' is not allowed.
Enzyme cutting rules and partial specificity can be specified.
Resulting protein hits appear in the order of the FASTA file, except for orphaned proteins, which will appear first with an empty target_decoy metavalue.
Duplicate protein accessions & sequences will not raise a warning, but create multiple hits (PeptideIndexer scans over the FASTA file once for efficiency
reasons, and thus might not see all accessions & sequences at once).
All peptide and protein hits are annotated with target/decoy information, using the meta value "target_decoy".
For proteins the possible values are "target" and "decoy", depending on whether the protein accession contains the decoy pattern (parameter @p decoy_string)
as a suffix or prefix, respectively (see parameter @p prefix).
Peptide hits are annotated with metavalue 'protein_references', and if matched to at least one protein also with metavalue 'target_decoy'.
The possible values for 'target_decoy' are "target", "decoy" and "target+decoy",
depending on whether the peptide sequence is found only in target proteins, only in decoy proteins, or in both. The metavalue is not present, if the peptide is unmatched.
Runtime: PeptideIndexer is usually very fast (loading and storing the data takes the most time) and search speed can be further improved (linearly), but using more threads.
Avoid allowing too many (>=4) ambiguous amino acids if your database contains long stretches of 'X' (exponential search space).
@param proteins A list of proteins -- either read piecewise from a FASTA file or as existing vector of FASTAEntries.
@param prot_ids Resulting protein identifications associated to pep_ids (will be re-written completely)
@param pep_ids Peptide identifications which should be search within @p proteins and then linked to @p prot_ids
@return Exit status codes.
*/
template<typename T>
ExitCodes run(FASTAContainer<T>& proteins, std::vector<ProteinIdentification>& prot_ids, std::vector<PeptideIdentification>& pep_ids)
{
if ((enzyme_name_ == "Chymotrypsin" || enzyme_name_ == "Chymotrypsin/P" || enzyme_name_ == "TrypChymo")
&& IL_equivalent_)
{
throw Exception::InvalidParameter(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION,
"The used enzyme " + enzyme_name_ + "differentiates between I and L, therefore the IL_equivalent option cannot be used.");
}
// no decoy string provided? try to deduce from data
if (decoy_string_.empty())
{
auto r = DecoyHelper::findDecoyString(proteins);
proteins.reset();
if (!r.success)
{
r.is_prefix = true;
r.name = "DECOY_";
OPENMS_LOG_WARN << "Unable to determine decoy string automatically (not enough decoys were detected)! Using default " << (r.is_prefix ? "prefix" : "suffix") << " decoy string '" << r.name << "'\n"
<< "If you think that this is incorrect, please provide a decoy_string and its position manually!" << std::endl;
}
prefix_ = r.is_prefix;
decoy_string_ = r.name;
// decoy string and position was extracted successfully
OPENMS_LOG_INFO << "Using " << (prefix_ ? "prefix" : "suffix") << " decoy string '" << decoy_string_ << "'" << std::endl;
}
//---------------------------------------------------------------
// parsing parameters, correcting xtandem and MSGFPlus parameters
//---------------------------------------------------------------
ProteaseDigestion enzyme;
if (!enzyme_name_.empty() && (enzyme_name_.compare(AUTO_MODE) != 0))
{ // use param (not empty, not 'auto')
enzyme.setEnzyme(enzyme_name_);
}
else if (!prot_ids.empty() && prot_ids[0].getSearchParameters().digestion_enzyme.getName() != "unknown_enzyme")
{ // take from meta (this assumes all runs used the same enzyme)
OPENMS_LOG_INFO << "Info: using '" << prot_ids[0].getSearchParameters().digestion_enzyme.getName() << "' as enzyme (obtained from idXML) for digestion." << std::endl;
enzyme.setEnzyme(&prot_ids[0].getSearchParameters().digestion_enzyme);
}
else
{ // fallback
OPENMS_LOG_WARN << "Warning: Enzyme name neither given nor deduceable from input. Defaulting to Trypsin!" << std::endl;
enzyme.setEnzyme("Trypsin");
}
bool xtandem_fix_parameters = false;
bool msgfplus_fix_parameters = false;
// determine if at least one search engine was xtandem or MSGFPlus to enable special rules
for (const auto& prot_id : prot_ids)
{
String search_engine = prot_id.getOriginalSearchEngineName();
StringUtils::toUpper(search_engine);
OPENMS_LOG_INFO << "Peptide identification engine: " << search_engine << std::endl;
if (search_engine == "XTANDEM" || prot_id.getSearchParameters().metaValueExists("SE:XTandem")) { xtandem_fix_parameters = true; }
if (search_engine == "MS-GF+" || search_engine == "MSGFPLUS" || prot_id.getSearchParameters().metaValueExists("SE:MS-GF+")) { msgfplus_fix_parameters = true; }
}
// including MSGFPlus -> Trypsin/P as enzyme
if (msgfplus_fix_parameters && enzyme.getEnzymeName() == "Trypsin")
{
OPENMS_LOG_WARN << "MSGFPlus detected but enzyme cutting rules were set to Trypsin. Correcting to Trypsin/P to cope with special cutting rule in MSGFPlus." << std::endl;
enzyme.setEnzyme("Trypsin/P");
}
OPENMS_LOG_INFO << "Enzyme: " << enzyme.getEnzymeName() << std::endl;
if (!enzyme_specificity_.empty() && (enzyme_specificity_.compare(AUTO_MODE) != 0))
{ // use param (not empty and not 'auto')
enzyme.setSpecificity(ProteaseDigestion::getSpecificityByName(enzyme_specificity_));
}
else if (!prot_ids.empty() && prot_ids[0].getSearchParameters().enzyme_term_specificity != ProteaseDigestion::SPEC_UNKNOWN)
{ // deduce from data ('auto')
enzyme.setSpecificity(prot_ids[0].getSearchParameters().enzyme_term_specificity);
OPENMS_LOG_INFO << "Info: using '" << EnzymaticDigestion::NamesOfSpecificity[prot_ids[0].getSearchParameters().enzyme_term_specificity] << "' as enzyme specificity (obtained from idXML) for digestion." << std::endl;
}
else
{ // fallback
OPENMS_LOG_WARN << "Warning: Enzyme specificity neither given nor present in the input file. Defaulting to 'full'!" << std::endl;
enzyme.setSpecificity(ProteaseDigestion::SPEC_FULL);
}
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
// cache the first proteins
const size_t PROTEIN_CACHE_SIZE = 4e5; // 400k should be enough for most DB's and is not too hard on memory either (~200 MB FASTA)
this->startProgress(0, 1, "Load first DB chunk");
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
this->endProgress();
if (proteins.empty()) // we do not allow an empty database
{
OPENMS_LOG_ERROR << "Error: An empty database was provided. Mapping makes no sense. Aborting..." << std::endl;
return DATABASE_EMPTY;
}
if (pep_ids.empty()) // Aho-Corasick requires non-empty input; but we allow this case, since the TOPP tool should not crash when encountering a bad raw file (with no PSMs)
{
OPENMS_LOG_WARN << "Warning: An empty set of peptide identifications was provided. Output will be empty as well." << std::endl;
if (!keep_unreferenced_proteins_)
{
// delete only protein hits, not whole ID runs incl. meta data:
for (std::vector<ProteinIdentification>::iterator it = prot_ids.begin();
it != prot_ids.end(); ++it)
{
it->getHits().clear();
}
}
return PEPTIDE_IDS_EMPTY;
}
FoundProteinFunctor func(enzyme, xtandem_fix_parameters); // store the matches
Map<String, Size> acc_to_prot; // map: accessions --> FASTA protein index
std::vector<bool> protein_is_decoy; // protein index -> is decoy?
std::vector<std::string> protein_accessions; // protein index -> accession
bool invalid_protein_sequence = false; // check for proteins with modifications, i.e. '[' or '(', and throw an exception
{ // new scope - forget data after search
/*
BUILD Peptide DB
*/
bool has_illegal_AAs(false);
AhoCorasickAmbiguous::PeptideDB pep_DB;
for (std::vector<PeptideIdentification>::const_iterator it1 = pep_ids.begin(); it1 != pep_ids.end(); ++it1)
{
//String run_id = it1->getIdentifier();
const std::vector<PeptideHit>& hits = it1->getHits();
for (std::vector<PeptideHit>::const_iterator it2 = hits.begin(); it2 != hits.end(); ++it2)
{
//
// Warning:
// do not skip over peptides here, since the results are iterated in the same way
//
String seq = it2->getSequence().toUnmodifiedString().remove('*'); // make a copy, i.e. do NOT change the peptide sequence!
if (seqan::isAmbiguous(seqan::AAString(seq.c_str())))
{ // do not quit here, to show the user all sequences .. only quit after loop
OPENMS_LOG_ERROR << "Peptide sequence '" << it2->getSequence() << "' contains one or more ambiguous amino acids (B|J|Z|X).\n";
has_illegal_AAs = true;
}
if (IL_equivalent_) // convert L to I;
{
seq.substitute('L', 'I');
}
appendValue(pep_DB, seq.c_str());
}
}
if (has_illegal_AAs)
{
OPENMS_LOG_ERROR << "One or more peptides contained illegal amino acids. This is not allowed!"
<< "\nPlease either remove the peptide or replace it with one of the unambiguous ones (while allowing for ambiguous AA's to match the protein)." << std::endl;;
}
OPENMS_LOG_INFO << "Mapping " << length(pep_DB) << " peptides to " << (proteins.size() == PROTEIN_CACHE_SIZE ? "? (unknown number of)" : String(proteins.size())) << " proteins." << std::endl;
if (length(pep_DB) == 0)
{ // Aho-Corasick will crash if given empty needles as input
OPENMS_LOG_WARN << "Warning: Peptide identifications have no hits inside! Output will be empty as well." << std::endl;
return PEPTIDE_IDS_EMPTY;
}
/*
Aho Corasick (fast)
*/
OPENMS_LOG_INFO << "Searching with up to " << aaa_max_ << " ambiguous amino acid(s) and " << mm_max_ << " mismatch(es)!" << std::endl;
SysInfo::MemUsage mu;
OPENMS_LOG_INFO << "Building trie ...";
StopWatch s;
s.start();
AhoCorasickAmbiguous::FuzzyACPattern pattern;
AhoCorasickAmbiguous::initPattern(pep_DB, aaa_max_, mm_max_, pattern);
s.stop();
OPENMS_LOG_INFO << " done (" << int(s.getClockTime()) << "s)" << std::endl;
s.reset();
uint16_t count_j_proteins(0);
bool has_active_data = true; // becomes false if end of FASTA file is reached
const std::string jumpX(aaa_max_ + mm_max_ + 1, 'X'); // jump over stretches of 'X' which cost a lot of time; +1 because AXXA is a valid hit for aaa_max == 2 (cannot split it)
// use very large target value for progress if DB size is unknown (did not fit into first chunk)
this->startProgress(0, proteins.size() == PROTEIN_CACHE_SIZE ? std::numeric_limits<SignedSize>::max() : proteins.size(), "Aho-Corasick");
std::atomic<int> progress_prots(0);
#ifdef _OPENMP
#pragma omp parallel
#endif
{
FoundProteinFunctor func_threads(enzyme, xtandem_fix_parameters);
Map<String, Size> acc_to_prot_thread; // map: accessions --> FASTA protein index
AhoCorasickAmbiguous fuzzyAC;
String prot;
while (true)
{
#pragma omp barrier // all threads need to be here, since we are about to swap protein data
#pragma omp single
{
DEBUG_ONLY std::cerr << " activating cache ...\n";
has_active_data = proteins.activateCache(); // swap in last cache
protein_accessions.resize(proteins.getChunkOffset() + proteins.chunkSize());
} // implicit barrier here
if (!has_active_data) break; // leave while-loop
SignedSize prot_count = (SignedSize)proteins.chunkSize();
#pragma omp master
{
DEBUG_ONLY std::cerr << "Filling Protein Cache ...";
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
protein_is_decoy.resize(proteins.getChunkOffset() + prot_count);
for (SignedSize i = 0; i < prot_count; ++i)
{ // do this in master only, to avoid false sharing
const String& seq = proteins.chunkAt(i).identifier;
protein_is_decoy[i + proteins.getChunkOffset()] = (prefix_ ? seq.hasPrefix(decoy_string_) : seq.hasSuffix(decoy_string_));
}
DEBUG_ONLY std::cerr << " done" << std::endl;
}
DEBUG_ONLY std::cerr << " starting for loop \n";
// search all peptides in each protein
#pragma omp for schedule(dynamic, 100) nowait
for (SignedSize i = 0; i < prot_count; ++i)
{
++progress_prots; // atomic
if (omp_get_thread_num() == 0)
{
this->setProgress(progress_prots);
}
prot = proteins.chunkAt(i).sequence;
prot.remove('*');
// check for invalid sequences with modifications
if (prot.has('[') || prot.has('('))
{
invalid_protein_sequence = true; // not omp-critical because its write-only
// we cannot throw an exception here, since we'd need to catch it within the parallel region
}
// convert L/J to I; also replace 'J' in proteins
if (IL_equivalent_)
{
prot.substitute('L', 'I');
prot.substitute('J', 'I');
}
else
{ // warn if 'J' is found (it eats into aaa_max)
if (prot.has('J'))
{
#pragma omp atomic
++count_j_proteins;
}
}
Size prot_idx = i + proteins.getChunkOffset();
// test if protein was a hit
Size hits_total = func_threads.filter_passed + func_threads.filter_rejected;
// check if there are stretches of 'X'
if (prot.has('X'))
{
// create chunks of the protein (splitting it at stretches of 'X..X') and feed them to AC one by one
size_t offset = -1, start = 0;
while ((offset = prot.find(jumpX, offset + 1)) != std::string::npos)
{
//std::cout << "found X..X at " << offset << " in protein " << proteins[i].identifier << "\n";
addHits_(fuzzyAC, pattern, pep_DB, prot.substr(start, offset + jumpX.size() - start), prot, prot_idx, (int)start, func_threads);
// skip ahead while we encounter more X...
while (offset + jumpX.size() < prot.size() && prot[offset + jumpX.size()] == 'X') ++offset;
start = offset;
//std::cout << " new start: " << start << "\n";
}
// last chunk
if (start < prot.size())
{
addHits_(fuzzyAC, pattern, pep_DB, prot.substr(start), prot, prot_idx, (int)start, func_threads);
}
}
else
{
addHits_(fuzzyAC, pattern, pep_DB, prot, prot, prot_idx, 0, func_threads);
}
// was protein found?
if (hits_total < func_threads.filter_passed + func_threads.filter_rejected)
{
protein_accessions[prot_idx] = proteins.chunkAt(i).identifier;
acc_to_prot_thread[protein_accessions[prot_idx]] = prot_idx;
}
} // end parallel FOR
// join results again
DEBUG_ONLY std::cerr << " critical now \n";
#ifdef _OPENMP
#pragma omp critical(PeptideIndexer_joinAC)
#endif
{
s.start();
// hits
func.merge(func_threads);
// accession -> index
acc_to_prot.insert(acc_to_prot_thread.begin(), acc_to_prot_thread.end());
acc_to_prot_thread.clear();
s.stop();
} // OMP end critical
} // end readChunk
} // OMP end parallel
this->endProgress();
std::cout << "Merge took: " << s.toString() << "\n";
mu.after();
std::cout << mu.delta("Aho-Corasick") << "\n\n";
OPENMS_LOG_INFO << "\nAho-Corasick done:\n found " << func.filter_passed << " hits for " << func.pep_to_prot.size() << " of " << length(pep_DB) << " peptides.\n";
// write some stats
OPENMS_LOG_INFO << "Peptide hits passing enzyme filter: " << func.filter_passed << "\n"
<< " ... rejected by enzyme filter: " << func.filter_rejected << std::endl;
if (count_j_proteins)
{
OPENMS_LOG_WARN << "PeptideIndexer found " << count_j_proteins << " protein sequences in your database containing the amino acid 'J'."
<< "To match 'J' in a protein, an ambiguous amino acid placeholder for I/L will be used.\n"
<< "This costs runtime and eats into the 'aaa_max' limit, leaving less opportunity for B/Z/X matches.\n"
<< "If you want 'J' to be treated as unambiguous, enable '-IL_equivalent'!" << std::endl;
}
} // end local scope
//
// do mapping
//
// index existing proteins
Map<String, Size> runid_to_runidx; // identifier to index
for (Size run_idx = 0; run_idx < prot_ids.size(); ++run_idx)
{
runid_to_runidx[prot_ids[run_idx].getIdentifier()] = run_idx;
}
// for peptides --> proteins
Size stats_matched_unique(0);
Size stats_matched_multi(0);
Size stats_unmatched(0); // no match to DB
Size stats_count_m_t(0); // match to Target DB
Size stats_count_m_d(0); // match to Decoy DB
Size stats_count_m_td(0); // match to T+D DB
Map<Size, std::set<Size> > runidx_to_protidx; // in which protID do appear which proteins (according to mapped peptides)
Size pep_idx(0);
for (std::vector<PeptideIdentification>::iterator it1 = pep_ids.begin(); it1 != pep_ids.end(); ++it1)
{
// which ProteinIdentification does the peptide belong to?
Size run_idx = runid_to_runidx[it1->getIdentifier()];
std::vector<PeptideHit>& hits = it1->getHits();
for (std::vector<PeptideHit>::iterator it_hit = hits.begin(); it_hit != hits.end(); /* no increase here! we might need to skip it; see below */)
{
// clear protein accessions
it_hit->setPeptideEvidences(std::vector<PeptideEvidence>());
//
// is this a decoy hit?
//
bool matches_target(false);
bool matches_decoy(false);
std::set<Size> prot_indices; /// protein hits of this peptide
// add new protein references
for (std::set<PeptideProteinMatchInformation>::const_iterator it_i = func.pep_to_prot[pep_idx].begin();
it_i != func.pep_to_prot[pep_idx].end(); ++it_i)
{
prot_indices.insert(it_i->protein_index);
const String& accession = protein_accessions[it_i->protein_index];
PeptideEvidence pe(accession, it_i->position, it_i->position + (int)it_hit->getSequence().size() - 1, it_i->AABefore, it_i->AAAfter);
it_hit->addPeptideEvidence(pe);
runidx_to_protidx[run_idx].insert(it_i->protein_index); // fill protein hits
if (protein_is_decoy[it_i->protein_index])
{
matches_decoy = true;
}
else
{
matches_target = true;
}
}
++pep_idx; // next hit
if (matches_decoy && matches_target)
{
it_hit->setMetaValue("target_decoy", "target+decoy");
++stats_count_m_td;
}
else if (matches_target)
{
it_hit->setMetaValue("target_decoy", "target");
++stats_count_m_t;
}
else if (matches_decoy)
{
it_hit->setMetaValue("target_decoy", "decoy");
++stats_count_m_d;
} // else: could match to no protein (i.e. both are false)
//else ... // not required (handled below; see stats_unmatched);
if (prot_indices.size() == 1)
{
it_hit->setMetaValue("protein_references", "unique");
++stats_matched_unique;
}
else if (prot_indices.size() > 1)
{
it_hit->setMetaValue("protein_references", "non-unique");
++stats_matched_multi;
}
else
{
++stats_unmatched;
if (stats_unmatched < 15) OPENMS_LOG_INFO << "Unmatched peptide: " << it_hit->getSequence() << "\n";
else if (stats_unmatched == 15) OPENMS_LOG_INFO << "Unmatched peptide: ...\n";
if (unmatched_action_ == Unmatched::REMOVE)
{
it_hit = hits.erase(it_hit);
continue; // already points to the next hit
}
else
{
it_hit->setMetaValue("protein_references", "unmatched");
}
}
++it_hit; // next hit
} // all hits
} // next PepID
Size total_peptides = stats_count_m_t + stats_count_m_d + stats_count_m_td + stats_unmatched;
OPENMS_LOG_INFO << "-----------------------------------\n";
OPENMS_LOG_INFO << "Peptide statistics\n";
OPENMS_LOG_INFO << "\n";
OPENMS_LOG_INFO << " unmatched : " << stats_unmatched << " (" << stats_unmatched * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << " target/decoy:\n";
OPENMS_LOG_INFO << " match to target DB only: " << stats_count_m_t << " (" << stats_count_m_t * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << " match to decoy DB only : " << stats_count_m_d << " (" << stats_count_m_d * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << " match to both : " << stats_count_m_td << " (" << stats_count_m_td * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << "\n";
OPENMS_LOG_INFO << " mapping to proteins:\n";
OPENMS_LOG_INFO << " no match (to 0 protein) : " << stats_unmatched << "\n";
OPENMS_LOG_INFO << " unique match (to 1 protein) : " << stats_matched_unique << "\n";
OPENMS_LOG_INFO << " non-unique match (to >1 protein): " << stats_matched_multi << std::endl;
/// for proteins --> peptides
Size stats_matched_proteins(0), stats_matched_new_proteins(0), stats_orphaned_proteins(0), stats_proteins_target(0), stats_proteins_decoy(0);
// all peptides contain the correct protein hit references, now update the protein hits
for (Size run_idx = 0; run_idx < prot_ids.size(); ++run_idx)
{
std::set<Size> masterset = runidx_to_protidx[run_idx]; // all protein matches from above
std::vector<ProteinHit>& phits = prot_ids[run_idx].getHits();
{
// go through existing protein hits and count orphaned proteins (with no peptide hits)
std::vector<ProteinHit> orphaned_hits;
for (std::vector<ProteinHit>::iterator p_hit = phits.begin(); p_hit != phits.end(); ++p_hit)
{
const String& acc = p_hit->getAccession();
if (!acc_to_prot.has(acc)) // acc_to_prot only contains found proteins from current run
{ // old hit is orphaned
++stats_orphaned_proteins;
if (keep_unreferenced_proteins_)
{
p_hit->setMetaValue("target_decoy", "");
orphaned_hits.push_back(*p_hit);
}
}
}
// only keep orphaned hits (if any)
phits = orphaned_hits;
}
// add new protein hits
FASTAFile::FASTAEntry fe;
phits.reserve(phits.size() + masterset.size());
for (std::set<Size>::const_iterator it = masterset.begin(); it != masterset.end(); ++it)
{
ProteinHit hit;
hit.setAccession(protein_accessions[*it]);
if (write_protein_sequence_ || write_protein_description_)
{
proteins.readAt(fe, *it);
if (write_protein_sequence_)
{
hit.setSequence(fe.sequence);
} // no else, since sequence is empty by default
if (write_protein_description_)
{
hit.setDescription(fe.description);
} // no else, since description is empty by default
}
if (protein_is_decoy[*it])
{
hit.setMetaValue("target_decoy", "decoy");
++stats_proteins_decoy;
}
else
{
hit.setMetaValue("target_decoy", "target");
++stats_proteins_target;
}
phits.push_back(hit);
++stats_matched_new_proteins;
}
stats_matched_proteins += phits.size();
}
OPENMS_LOG_INFO << "-----------------------------------\n";
OPENMS_LOG_INFO << "Protein statistics\n";
OPENMS_LOG_INFO << "\n";
OPENMS_LOG_INFO << " total proteins searched: " << proteins.size() << "\n";
OPENMS_LOG_INFO << " matched proteins : " << stats_matched_proteins << " (" << stats_matched_new_proteins << " new)\n";
if (stats_matched_proteins)
{ // prevent Division-by-0 Exception
OPENMS_LOG_INFO << " matched target proteins: " << stats_proteins_target << " (" << stats_proteins_target * 100 / stats_matched_proteins << " %)\n";
OPENMS_LOG_INFO << " matched decoy proteins : " << stats_proteins_decoy << " (" << stats_proteins_decoy * 100 / stats_matched_proteins << " %)\n";
}
OPENMS_LOG_INFO << " orphaned proteins : " << stats_orphaned_proteins << (keep_unreferenced_proteins_ ? " (all kept)" : " (all removed)\n");
OPENMS_LOG_INFO << "-----------------------------------" << std::endl;
/// exit if no peptides were matched to decoy
bool has_error = false;
if (invalid_protein_sequence)
{
OPENMS_LOG_ERROR << "Error: One or more protein sequences contained the characters '[' or '(', which are illegal in protein sequences."
<< "\nPeptide hits might be masked by these characters (which usually indicate presence of modifications).\n";
has_error = true;
}
if ((stats_count_m_d + stats_count_m_td) == 0)
{
String msg("No peptides were matched to the decoy portion of the database! Did you provide the correct concatenated database? Are your 'decoy_string' (=" + decoy_string_ + ") and 'decoy_string_position' (=" + std::string(param_.getValue("decoy_string_position")) + ") settings correct?");
if (missing_decoy_action_ == MissingDecoy::IS_ERROR)
{
OPENMS_LOG_ERROR << "Error: " << msg << "\nSet 'missing_decoy_action' to 'warn' if you are sure this is ok!\nAborting ..." << std::endl;
has_error = true;
}
else if (missing_decoy_action_ == MissingDecoy::WARN)
{
OPENMS_LOG_WARN << "Warn: " << msg << "\nSet 'missing_decoy_action' to 'error' if you want to elevate this to an error!" << std::endl;
}
else // silent
{
}
}
if (stats_unmatched > 0)
{
OPENMS_LOG_ERROR << "PeptideIndexer found unmatched peptides, which could not be associated to a protein.\n";
if (unmatched_action_ == Unmatched::IS_ERROR)
{
OPENMS_LOG_ERROR
<< "Potential solutions:\n"
<< " - check your FASTA database is identical to the search DB (or use 'auto')\n"
<< " - set 'enzyme:specificity' and 'enzyme:name' to 'auto' to match the parameters of the search engine\n"
<< " - increase 'aaa_max' to allow more ambiguous amino acids\n"
<< " - as a last resort: use the 'unmatched_action' option to accept or even remove unmatched peptides\n"
<< " (note that unmatched peptides cannot be used for FDR calculation or quantification)\n";
has_error = true;
}
else if (unmatched_action_ == Unmatched::WARN)
{
OPENMS_LOG_ERROR << " Warning: " << stats_unmatched << " unmatched hits have been found, but were not removed!\n"
<< "These are not annotated with target/decoy information and might lead to issues with downstream tools (such as FDR).\n"
<< "Switch to '" << names_of_unmatched[(Size)Unmatched::REMOVE] << "' if you want to avoid these problems.\n";
}
else if (unmatched_action_ == Unmatched::REMOVE)
{
OPENMS_LOG_ERROR << " Warning: " << stats_unmatched <<" unmatched hits have been removed!\n"
<< "Make sure that these hits are actually a violation of the cutting rules by inspecting the database!\n";
if (xtandem_fix_parameters) OPENMS_LOG_ERROR << "Since the results are from X!Tandem, this is probably ok (check anyways).\n";
}
else
{
throw Exception::NotImplemented(__FILE__, __LINE__, OPENMS_PRETTY_FUNCTION);
}
}
if (has_error)
{
OPENMS_LOG_ERROR << "Result files will be written, but PeptideIndexer will exit with an error code." << std::endl;
return UNEXPECTED_RESULT;
}
return EXECUTION_OK;
}
const String& getDecoyString() const;
bool isPrefix() const;
protected:
struct PeptideProteinMatchInformation
{
OpenMS::Size protein_index; //< index of the protein the peptide is contained in
OpenMS::Int position; //< the position of the peptide in the protein
char AABefore; //< the amino acid after the peptide in the protein
char AAAfter; //< the amino acid before the peptide in the protein
const std::tuple<const Size&, const Int&, const char&, const char&> tie() const
{
return std::tie(protein_index, position, AABefore, AAAfter);
}
bool operator<(const PeptideProteinMatchInformation& other) const
{
return tie() < other.tie();
}
bool operator==(const PeptideProteinMatchInformation& other) const
{
return tie() == other.tie();
}
};
struct FoundProteinFunctor
{
public:
typedef std::map<OpenMS::Size, std::set<PeptideProteinMatchInformation> > MapType;
MapType pep_to_prot; //< peptide index --> protein indices
OpenMS::Size filter_passed; //< number of accepted hits (passing addHit() constraints)
OpenMS::Size filter_rejected; //< number of rejected hits (not passing addHit())
private:
ProteaseDigestion enzyme_;
bool xtandem_; //< are we checking xtandem cleavage rules?
public:
explicit FoundProteinFunctor(const ProteaseDigestion& enzyme, bool xtandem) :
pep_to_prot(), filter_passed(0), filter_rejected(0), enzyme_(enzyme), xtandem_(xtandem)
{
}
void merge(FoundProteinFunctor& other)
{
if (pep_to_prot.empty())
{ // first merge is easy
pep_to_prot.swap(other.pep_to_prot);
}
else
{
for (FoundProteinFunctor::MapType::const_iterator it = other.pep_to_prot.begin(); it != other.pep_to_prot.end(); ++it)
{ // augment set
this->pep_to_prot[it->first].insert(other.pep_to_prot[it->first].begin(), other.pep_to_prot[it->first].end());
}
other.pep_to_prot.clear();
}
// cheap members
this->filter_passed += other.filter_passed;
other.filter_passed = 0;
this->filter_rejected += other.filter_rejected;
other.filter_rejected = 0;
}
void addHit(const OpenMS::Size idx_pep,
const OpenMS::Size idx_prot,
const OpenMS::Size len_pep,
const OpenMS::String& seq_prot,
OpenMS::Int position)
{
//TODO we could read and double-check missed cleavages as well
if (enzyme_.isValidProduct(seq_prot, position, len_pep, true, true, xtandem_))
{
PeptideProteinMatchInformation match
{
idx_prot,
position,
(position == 0) ? PeptideEvidence::N_TERMINAL_AA : seq_prot[position - 1],
(position + len_pep >= seq_prot.size()) ?
PeptideEvidence::C_TERMINAL_AA :
seq_prot[position + len_pep]
};
pep_to_prot[idx_pep].insert(match);
++filter_passed;
}
else
{
//std::cerr << "REJECTED Peptide " << seq_pep << " with hit to protein "
// << seq_prot << " at position " << position << std::endl;
++filter_rejected;
}
}
};
inline void addHits_(AhoCorasickAmbiguous& fuzzyAC, const AhoCorasickAmbiguous::FuzzyACPattern& pattern, const AhoCorasickAmbiguous::PeptideDB& pep_DB, const String& prot, const String& full_prot, SignedSize idx_prot, Int offset, FoundProteinFunctor& func_threads) const
{
fuzzyAC.setProtein(prot);
while (fuzzyAC.findNext(pattern))
{
const seqan::Peptide& tmp_pep = pep_DB[fuzzyAC.getHitDBIndex()];
func_threads.addHit(fuzzyAC.getHitDBIndex(), idx_prot, length(tmp_pep), full_prot, fuzzyAC.getHitProteinPosition() + offset);
}
}
void updateMembers_() override;
String decoy_string_{};
bool prefix_{ false };
MissingDecoy missing_decoy_action_ = MissingDecoy::IS_ERROR;
String enzyme_name_{};
String enzyme_specificity_{};
bool write_protein_sequence_{ false };
bool write_protein_description_{ false };
bool keep_unreferenced_proteins_{ false };
Unmatched unmatched_action_ = Unmatched::IS_ERROR;
bool IL_equivalent_{ false };
Int aaa_max_{0};
Int mm_max_{0};
};
}
|
omp_section_lastprivate.c | <ompts:test>
<ompts:testdescription>Test which checks the omp section lastprivate directive.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp section lastprivate</ompts:directive>
<ompts:testcode>
#include <stdio.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_section_lastprivate</ompts:testcode:functionname>(FILE * logFile){
<ompts:orphan:vars>
int i0 = -1;
int sum = 0;
int i;
int sum0 = 0;
</ompts:orphan:vars>
int known_sum;
i0 = -1;
sum = 0;
#pragma omp parallel
{
<ompts:orphan>
#pragma omp sections <ompts:check>lastprivate(i0)</ompts:check><ompts:crosscheck>private(i0)</ompts:crosscheck> private(i,sum0)
{
#pragma omp section
{
sum0 = 0;
for (i = 1; i < 400; i++)
{
sum0 = sum0 + i;
i0 = i;
}
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical*/
} /* end of section */
#pragma omp section
{
sum0 = 0;
for(i = 400; i < 700; i++)
{
sum0 = sum0 + i;
i0 = i;
}
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical*/
}
#pragma omp section
{
sum0 = 0;
for(i = 700; i < 1000; i++)
{
sum0 = sum0 + i;
i0 = i;
}
#pragma omp critical
{
sum = sum + sum0;
} /*end of critical*/
}
} /* end of sections*/
</ompts:orphan>
} /* end of parallel*/
known_sum = (999 * 1000) / 2;
return ((known_sum == sum) && (i0 == 999) );
}
</ompts:testcode>
</ompts:test>
|
GB_binop__iseq_fc64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__iseq_fc64
// A.*B function (eWiseMult): GB_AemultB__iseq_fc64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__iseq_fc64
// C+=b function (dense accum): GB_Cdense_accumb__iseq_fc64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_fc64
// C=scalar+B GB_bind1st__iseq_fc64
// C=scalar+B' GB_bind1st_tran__iseq_fc64
// C=A+scalar GB_bind2nd__iseq_fc64
// C=A'+scalar GB_bind2nd_tran__iseq_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// B,b type: GxB_FC64_t
// BinaryOp: cij = GB_FC64_iseq (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_FC64_iseq (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_FC64 || GxB_NO_ISEQ_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__iseq_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__iseq_fc64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__iseq_fc64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__iseq_fc64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__iseq_fc64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__iseq_fc64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t bij = Bx [p] ;
Cx [p] = GB_FC64_iseq (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__iseq_fc64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
Cx [p] = GB_FC64_iseq (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_iseq (x, aij) ; \
}
GrB_Info GB_bind1st_tran__iseq_fc64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = Ax [pA] ; \
Cx [pC] = GB_FC64_iseq (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__iseq_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
CorotationalStrainMapping.h | /******************************************************************************
* SOFA, Simulation Open-Framework Architecture, development version *
* (c) 2006-2017 INRIA, USTL, UJF, CNRS, MGH *
* *
* This program is free software; you can redistribute it and/or modify it *
* under the terms of the GNU Lesser General Public License as published by *
* the Free Software Foundation; either version 2.1 of the License, or (at *
* your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, but WITHOUT *
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or *
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License *
* for more details. *
* *
* You should have received a copy of the GNU Lesser General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/>. *
*******************************************************************************
* Authors: The SOFA Team and external contributors (see Authors.txt) *
* *
* Contact information: contact@sofa-framework.org *
******************************************************************************/
#ifndef SOFA_COMPONENT_MAPPING_CorotationalStrainMAPPING_H
#define SOFA_COMPONENT_MAPPING_CorotationalStrainMAPPING_H
#include <Flexible/config.h>
#include "../strainMapping/BaseStrainMapping.h"
#include "../strainMapping/CorotationalStrainJacobianBlock.inl"
#include <sofa/helper/OptionsGroup.h>
namespace sofa
{
namespace component
{
namespace mapping
{
/** Deformation Gradient to Corotational Lagrangian Strain mapping
*
* @author Matthieu Nesme
*
*/
template <class TIn, class TOut>
class CorotationalStrainMapping : public BaseStrainMappingT<defaulttype::CorotationalStrainJacobianBlock<TIn,TOut> >
{
public:
typedef defaulttype::CorotationalStrainJacobianBlock<TIn,TOut> BlockType;
typedef BaseStrainMappingT<BlockType > Inherit;
SOFA_CLASS(SOFA_TEMPLATE2(CorotationalStrainMapping,TIn,TOut), SOFA_TEMPLATE(BaseStrainMappingT,BlockType ));
/** @name Corotational methods
SMALL = Cauchy strain
QR = Nesme et al, 2005, "Efficient, Physically Plausible Finite Elements"
POLAR = Etzmuß et al, 2003, "A Fast Finite Element Solution for Cloth Modelling" ; Muller et al, 2004 "Interactive Virtual Materials"
SVD = Irving et al, 2004, "Invertible finite elements for robust simulation of large deformation"
FROBENIUS = Muller et al, 2016, "A Robust Method to Extract the Rotational Part of Deformations"
*/
//@{
enum DecompositionMethod { POLAR=0, QR, SMALL, SVD, FROBENIUS, NB_DecompositionMethod };
Data<helper::OptionsGroup> f_method;
//@}
Data<bool> f_geometricStiffness; ///< should geometricStiffness be considered?
//Pierre-Luc : I added this function to use some functionalities of the mapping component whitout using it as a sofa graph component (protected)
virtual void initJacobianBlock( helper::vector<BlockType>& jacobianBlock )
{
if(this->f_printLog.getValue()==true)
std::cout << SOFA_CLASS_METHOD << std::endl;
switch( f_method.getValue().getSelectedId() )
{
case SMALL:
{
for( size_t i=0 ; i<jacobianBlock.size() ; i++ )
{
jacobianBlock[i].init_small();
}
break;
}
case QR:
{
for( size_t i=0 ; i<jacobianBlock.size() ; i++ )
{
jacobianBlock[i].init_qr( f_geometricStiffness.getValue() );
}
break;
}
case POLAR:
{
for( size_t i=0 ; i<jacobianBlock.size() ; i++ )
{
jacobianBlock[i].init_polar( f_geometricStiffness.getValue() );
}
break;
}
case SVD:
{
for( size_t i=0 ; i<jacobianBlock.size() ; i++ )
{
jacobianBlock[i].init_svd( f_geometricStiffness.getValue() );
}
break;
}
case FROBENIUS:
{
for( size_t i=0 ; i<jacobianBlock.size() ; i++ )
{
jacobianBlock[i].init_frobenius( f_geometricStiffness.getValue() );
}
break;
}
}
}
virtual void reinit()
{
Inherit::reinit();
switch( f_method.getValue().getSelectedId() )
{
case SMALL:
{
for( size_t i=0 ; i<this->jacobian.size() ; i++ )
{
this->jacobian[i].init_small();
}
break;
}
case QR:
{
for( size_t i=0 ; i<this->jacobian.size() ; i++ )
{
this->jacobian[i].init_qr( f_geometricStiffness.getValue() );
}
break;
}
case POLAR:
{
for( size_t i=0 ; i<this->jacobian.size() ; i++ )
{
this->jacobian[i].init_polar( f_geometricStiffness.getValue() );
}
break;
}
case SVD:
{
for( size_t i=0 ; i<this->jacobian.size() ; i++ )
{
this->jacobian[i].init_svd( f_geometricStiffness.getValue() );
}
break;
}
case FROBENIUS:
{
for( size_t i=0 ; i<this->jacobian.size() ; i++ )
{
this->jacobian[i].init_frobenius( f_geometricStiffness.getValue() );
}
break;
}
}
}
protected:
CorotationalStrainMapping (core::State<TIn>* from = NULL, core::State<TOut>* to= NULL)
: Inherit ( from, to )
, f_method( initData( &f_method, "method", "Decomposition method" ) )
, f_geometricStiffness( initData( &f_geometricStiffness, false, "geometricStiffness", "Should geometricStiffness be considered?" ) )
{
helper::OptionsGroup Options;
Options.setNbItems( NB_DecompositionMethod );
Options.setItemName( SMALL, "small" );
Options.setItemName( QR, "qr" );
Options.setItemName( POLAR, "polar" );
Options.setItemName( SVD, "svd" );
Options.setItemName( FROBENIUS, "frobenius" );
Options.setSelectedItem( SVD );
f_method.setValue( Options );
}
virtual ~CorotationalStrainMapping() { }
virtual void applyBlock(Data<typename Inherit::OutVecCoord>& dOut, const Data<typename Inherit::InVecCoord>& dIn, helper::vector<BlockType>& jacobianBlock)
{
if(this->f_printLog.getValue()) std::cout<<this->getName()<<":apply"<<std::endl;
typename Inherit::OutVecCoord& out = *dOut.beginWriteOnly();
const typename Inherit::InVecCoord& in = dIn.getValue();
switch( f_method.getValue().getSelectedId() )
{
case SMALL:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(jacobianBlock.size()) ; i++ )
{
out[i] = typename Inherit::OutCoord();
jacobianBlock[i].addapply_small( out[i], in[i] );
}
break;
}
case QR:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(jacobianBlock.size()) ; i++ )
{
out[i] = typename Inherit::OutCoord();
jacobianBlock[i].addapply_qr( out[i], in[i] );
}
break;
}
case POLAR:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(jacobianBlock.size()) ; i++ )
{
out[i] = typename Inherit::OutCoord();
//std::cout << "applyBlock_polar : Index : " << i << std::endl;
jacobianBlock[i].addapply_polar( out[i], in[i] );
}
break;
}
case SVD:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(jacobianBlock.size()) ; i++ )
{
out[i] = typename Inherit::OutCoord();
jacobianBlock[i].addapply_svd( out[i], in[i] );
}
break;
}
case FROBENIUS:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(jacobianBlock.size()) ; i++ )
{
out[i] = typename Inherit::OutCoord();
jacobianBlock[i].addapply_frobenius( out[i], in[i] );
}
break;
}
}
dOut.endEdit();
}
virtual void apply( const core::MechanicalParams * /*mparams*/ , Data<typename Inherit::OutVecCoord>& dOut, const Data<typename Inherit::InVecCoord>& dIn )
{
if(this->f_printLog.getValue()) std::cout<<this->getName()<<":apply"<<std::endl;
helper::ReadAccessor<Data<typename Inherit::InVecCoord> > inpos (*this->fromModel->read(core::ConstVecCoordId::position()));
helper::ReadAccessor<Data<typename Inherit::OutVecCoord> > outpos (*this->toModel->read(core::ConstVecCoordId::position()));
if(inpos.size()!=outpos.size()) this->resizeOut();
typename Inherit::OutVecCoord& out = *dOut.beginWriteOnly();
const typename Inherit::InVecCoord& in = dIn.getValue();
switch( f_method.getValue().getSelectedId() )
{
case SMALL:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(this->jacobian.size()) ; i++ )
{
out[i] = typename Inherit::OutCoord();
this->jacobian[i].addapply_small( out[i], in[i] );
}
break;
}
case QR:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(this->jacobian.size()) ; i++ )
{
out[i] = typename Inherit::OutCoord();
this->jacobian[i].addapply_qr( out[i], in[i] );
}
break;
}
case POLAR:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(this->jacobian.size()) ; i++ )
{
out[i] = typename Inherit::OutCoord();
this->jacobian[i].addapply_polar( out[i], in[i] );
}
break;
}
case SVD:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(this->jacobian.size()) ; i++ )
{
out[i] = typename Inherit::OutCoord();
this->jacobian[i].addapply_svd( out[i], in[i] );
}
break;
}
case FROBENIUS:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(this->jacobian.size()) ; i++ )
{
out[i] = typename Inherit::OutCoord();
this->jacobian[i].addapply_frobenius( out[i], in[i] );
}
break;
}
}
dOut.endEdit();
if(!BlockType::constant && this->assemble.getValue()) this->updateJ();
}
virtual void applyDJT(const core::MechanicalParams* mparams, core::MultiVecDerivId parentDfId, core::ConstMultiVecDerivId )
{
if( !f_geometricStiffness.getValue() ) return;
if(BlockType::constant) return;
Data<typename Inherit::InVecDeriv>& parentForceData = *parentDfId[this->fromModel.get(mparams)].write();
const Data<typename Inherit::InVecDeriv>& parentDisplacementData = *mparams->readDx(this->fromModel);
const Data<typename Inherit::OutVecDeriv>& childForceData = *mparams->readF(this->toModel);
helper::WriteAccessor<Data<typename Inherit::InVecDeriv> > parentForce (parentForceData);
helper::ReadAccessor<Data<typename Inherit::InVecDeriv> > parentDisplacement (parentDisplacementData);
helper::ReadAccessor<Data<typename Inherit::OutVecDeriv> > childForce (childForceData);
if(this->assemble.getValue())
{
this->K.addMult(parentForceData,parentDisplacementData,mparams->kFactor());
}
else
{
switch( f_method.getValue().getSelectedId() )
{
case SMALL:
{
break;
}
case QR:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(this->jacobian.size()) ; i++ )
{
this->jacobian[i].addDForce_qr( parentForce[i], parentDisplacement[i], childForce[i], mparams->kFactor() );
}
break;
}
case POLAR:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(this->jacobian.size()) ; i++ )
{
this->jacobian[i].addDForce_polar( parentForce[i], parentDisplacement[i], childForce[i], mparams->kFactor() );
}
break;
}
case SVD:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(this->jacobian.size()) ; i++ )
{
this->jacobian[i].addDForce_svd( parentForce[i], parentDisplacement[i], childForce[i], mparams->kFactor() );
}
break;
}
case FROBENIUS:
{
#ifdef _OPENMP
#pragma omp parallel for if (this->d_parallel.getValue())
#endif
for( int i=0 ; i < static_cast<int>(this->jacobian.size()) ; i++ )
{
this->jacobian[i].addDForce_frobenius( parentForce[i], parentDisplacement[i], childForce[i], mparams->kFactor() );
}
break;
}
}
}
}
};
} // namespace mapping
} // namespace component
} // namespace sofa
#endif
|
TaskBeginLink.c | int x;
int main() {
#pragma omp task if (1)
{
11;
}
#pragma omp task
{
12;
}
#pragma omp task
{
int x;
}
}
|
par_gsmg.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Geometrically smooth interpolation multigrid
*
*****************************************************************************/
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#include "_hypre_lapack.h"
#ifndef ABS
#define ABS(x) ((x)>0 ? (x) : -(x))
#endif
#ifndef MAX
#define MAX(a,b) ((a)>(b)?(a):(b))
#endif
static HYPRE_Real mydnrm2(HYPRE_Int n, HYPRE_Real *x)
{
HYPRE_Real temp = 0.;
HYPRE_Int i;
for (i=0; i<n; i++)
temp = temp + x[i]*x[i];
return sqrt(temp);
}
static void mydscal(HYPRE_Int n, HYPRE_Real a, HYPRE_Real *x)
{
HYPRE_Int i;
for (i=0; i<n; i++)
x[i] = a * x[i];
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFillSmooth
* - fill in smooth matrix
* - this function will scale the smooth vectors
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixFillSmooth(HYPRE_Int nsamples, HYPRE_Real *samples,
hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A,
HYPRE_Int num_functions, HYPRE_Int *dof_func)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int i, j, k, ii, index, start;
HYPRE_Int num_cols_offd;
HYPRE_Int num_sends;
HYPRE_Int *dof_func_offd;
HYPRE_Int *int_buf_data;
HYPRE_Real temp;
HYPRE_Real *p;
HYPRE_Real *p_offd;
HYPRE_Real *p_ptr;
HYPRE_Real *buf_data;
HYPRE_Real nm;
#if 0
HYPRE_Real mx = 0., my = 1.e+10;
#endif
/* normalize each sample vector and divide by number of samples */
for (k=0; k<nsamples; k++)
{
nm = mydnrm2(n, samples+k*n);
nm = 1./nm/nsamples;
mydscal(n, nm, samples+k*n);
}
num_cols_offd = hypre_CSRMatrixNumCols(S_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
p_offd = hypre_CTAlloc(HYPRE_Real, nsamples*num_cols_offd, HYPRE_MEMORY_HOST);
p_ptr = p_offd;
p = samples;
for (k = 0; k < nsamples; k++)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
buf_data[index++]
= p[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data,
p_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
p = p+n;
p_offd = p_offd+num_cols_offd;
}
hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n; i++)
{
for (j = S_diag_i[i]+1; j < S_diag_i[i+1]; j++)
{
ii = S_diag_j[j];
/* only interpolate between like functions */
if (num_functions > 1 && dof_func[i] != dof_func[ii])
{
S_diag_data[j] = 0.;
continue;
}
/* explicit zeros */
if (A_diag_data[j] == 0.)
{
S_diag_data[j] = 0.;
continue;
}
temp = 0.;
p = samples;
for (k=0; k<nsamples; k++)
{
temp = temp + ABS(p[i] - p[ii]);
p = p + n;
}
/* explicit zeros in matrix may cause this */
if (temp == 0.)
{
S_diag_data[j] = 0.;
continue;
}
temp = 1./temp; /* reciprocal */
#if 0
my = hypre_min(my,temp);
mx = hypre_max(mx,temp);
#endif
S_diag_data[j] = temp;
}
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
ii = S_offd_j[j];
/* only interpolate between like functions */
if (num_functions > 1 && dof_func[i] != dof_func_offd[ii])
{
S_offd_data[j] = 0.;
continue;
}
/* explicit zeros */
if (A_offd_data[j] == 0.)
{
S_offd_data[j] = 0.;
continue;
}
temp = 0.;
p = samples;
p_offd = p_ptr;
for (k=0; k<nsamples; k++)
{
temp = temp + ABS(p[i] - p_offd[ii]);
p = p + n;
p_offd = p_offd + num_cols_offd;
}
/* explicit zeros in matrix may cause this */
if (temp == 0.)
{
S_offd_data[j] = 0.;
continue;
}
temp = 1./temp; /* reciprocal */
#if 0
my = hypre_min(my,temp);
mx = hypre_max(mx,temp);
#endif
S_offd_data[j] = temp;
}
}
#if 0
hypre_printf("MIN, MAX: %f %f\n", my, mx);
#endif
hypre_TFree(p_ptr, HYPRE_MEMORY_HOST);
if (num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixChooseThresh
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_ParCSRMatrixChooseThresh(hypre_ParCSRMatrix *S)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int i, j;
HYPRE_Real mx, minimax = 1.e+10;
HYPRE_Real minmin;
for (i=0; i<n; i++)
{
mx = 0.;
for (j=S_diag_i[i]; j<S_diag_i[i+1]; j++)
mx = hypre_max(mx, S_diag_data[j]);
for (j=S_offd_i[i]; j<S_offd_i[i+1]; j++)
mx = hypre_max(mx, S_offd_data[j]);
if (mx != 0.)
minimax = hypre_min(minimax, mx);
}
hypre_MPI_Allreduce(&minimax, &minmin, 1, HYPRE_MPI_REAL, hypre_MPI_MIN, comm);
return minmin;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixThreshold
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixThreshold(hypre_ParCSRMatrix *A, HYPRE_Real thresh)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_nonzeros_diag = A_diag_i[n];
HYPRE_Int num_nonzeros_offd = A_offd_i[n];
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
HYPRE_Real *S_diag_data;
HYPRE_Int *S_offd_i;
HYPRE_Int *S_offd_j;
HYPRE_Real *S_offd_data;
HYPRE_Int count, i, jS, jA;
/* first count the number of nonzeros we will need */
count = 0;
for (i=0; i<num_nonzeros_diag; i++)
if (A_diag_data[i] >= thresh)
count++;
/* allocate vectors */
S_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
S_diag_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
S_diag_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST);
jS = 0;
for (i = 0; i < n; i++)
{
S_diag_i[i] = jS;
for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= thresh)
{
S_diag_data[jS] = A_diag_data[jA];
S_diag_j[jS] = A_diag_j[jA];
jS++;
}
}
}
S_diag_i[n] = jS;
hypre_CSRMatrixNumNonzeros(A_diag) = jS;
/* free the vectors we don't need */
hypre_TFree(A_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(A_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(A_diag_data, HYPRE_MEMORY_HOST);
/* assign the new vectors */
hypre_CSRMatrixI(A_diag) = S_diag_i;
hypre_CSRMatrixJ(A_diag) = S_diag_j;
hypre_CSRMatrixData(A_diag) = S_diag_data;
/*
* Offd part
*/
/* first count the number of nonzeros we will need */
count = 0;
for (i=0; i<num_nonzeros_offd; i++)
if (A_offd_data[i] >= thresh)
count++;
/* allocate vectors */
S_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
S_offd_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
S_offd_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST);
jS = 0;
for (i = 0; i < n; i++)
{
S_offd_i[i] = jS;
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= thresh)
{
S_offd_data[jS] = A_offd_data[jA];
S_offd_j[jS] = A_offd_j[jA];
jS++;
}
}
}
S_offd_i[n] = jS;
hypre_CSRMatrixNumNonzeros(A_offd) = jS;
/* free the vectors we don't need */
hypre_TFree(A_offd_i, HYPRE_MEMORY_HOST);
hypre_TFree(A_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(A_offd_data, HYPRE_MEMORY_HOST);
/* assign the new vectors */
hypre_CSRMatrixI(A_offd) = S_offd_i;
hypre_CSRMatrixJ(A_offd) = S_offd_j;
hypre_CSRMatrixData(A_offd) = S_offd_data;
return 0;
}
/*--------------------------------------------------------------------------
* CreateSmoothVecs
* - smoother depends on the level being used
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateSmoothVecs(void *data,
hypre_ParCSRMatrix *A,
HYPRE_Int num_sweeps,
HYPRE_Int level,
HYPRE_Real **SmoothVecs_p)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_ParVector *Zero;
hypre_ParVector *Temp;
hypre_ParVector *U;
hypre_ParVector *Qtemp = NULL;
HYPRE_Int i;
HYPRE_BigInt n = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int n_local = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt *starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int sample;
HYPRE_Int nsamples = hypre_ParAMGDataNumSamples(amg_data);
HYPRE_Int ret;
HYPRE_Real *datax, *bp, *p;
HYPRE_Int rlx_type;
HYPRE_Int smooth_type;
HYPRE_Int smooth_option = 0;
HYPRE_Int smooth_num_levels;
HYPRE_Solver *smoother;
HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data);
HYPRE_Int num_threads;
num_threads = hypre_NumThreads();
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (debug_flag >= 1)
hypre_printf("Creating smooth dirs, %d sweeps, %d samples\n", num_sweeps,
nsamples);
smooth_type = hypre_ParAMGDataSmoothType(amg_data);
smooth_num_levels = hypre_ParAMGDataSmoothNumLevels(amg_data);
if (smooth_num_levels > level)
{
smooth_option = smooth_type;
smoother = hypre_ParAMGDataSmoother(amg_data);
num_sweeps = hypre_ParAMGDataSmoothNumSweeps(amg_data);
}
rlx_type = hypre_ParAMGDataGridRelaxType(amg_data)[0];
/* rlx_wt = hypre_ParAMGDataRelaxWeight(amg_data)[level]; */
/* omega = hypre_ParAMGDataOmega(amg_data)[level]; */
/* generate par vectors */
Zero = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorSetPartitioningOwner(Zero,0);
hypre_ParVectorInitialize(Zero);
datax = hypre_VectorData(hypre_ParVectorLocalVector(Zero));
for (i=0; i<n_local; i++)
datax[i] = 0.;
Temp = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorSetPartitioningOwner(Temp,0);
hypre_ParVectorInitialize(Temp);
datax = hypre_VectorData(hypre_ParVectorLocalVector(Temp));
for (i=0; i<n_local; i++)
datax[i] = 0.;
U = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorSetPartitioningOwner(U,0);
hypre_ParVectorInitialize(U);
datax = hypre_VectorData(hypre_ParVectorLocalVector(U));
if (num_threads > 1)
{
Qtemp = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorInitialize(Qtemp);
hypre_ParVectorSetPartitioningOwner(Qtemp,0);
}
/* allocate space for the vectors */
bp = hypre_CTAlloc(HYPRE_Real, nsamples*n_local, HYPRE_MEMORY_HOST);
p = bp;
/* generate random vectors */
for (sample=0; sample<nsamples; sample++)
{
for (i=0; i<n_local; i++)
datax[i] = hypre_Rand() - .5;
for (i=0; i<num_sweeps; i++)
{
if (smooth_option == 6)
{
HYPRE_SchwarzSolve(smoother[level],
(HYPRE_ParCSRMatrix) A,
(HYPRE_ParVector) Zero,
(HYPRE_ParVector) U);
}
else
{
ret = hypre_BoomerAMGRelax(A, Zero, NULL /*CFmarker*/,
rlx_type , 0 /*rel pts*/, 1.0 /*weight*/,
1.0 /*omega*/, NULL, U, Temp,
Qtemp);
hypre_assert(ret == 0);
}
}
/* copy out the solution */
for (i=0; i<n_local; i++)
*p++ = datax[i];
}
hypre_ParVectorDestroy(Zero);
hypre_ParVectorDestroy(Temp);
hypre_ParVectorDestroy(U);
if (num_threads > 1)
hypre_ParVectorDestroy(Qtemp);
*SmoothVecs_p = bp;
return 0;
}
/*--------------------------------------------------------------------------
* CreateSmoothDirs replaces CreateS in AMG
* - smoother depends on the level being used
* - in this version, CreateSmoothVecs must be called prior to this function
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateSmoothDirs(void *data,
hypre_ParCSRMatrix *A,
HYPRE_Real *SmoothVecs,
HYPRE_Real thresh,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
hypre_ParCSRMatrix **S_ptr)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
hypre_ParCSRMatrix *S;
HYPRE_Real minimax;
HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data);
S = hypre_ParCSRMatrixClone(A, 0);
/* Traverse S and fill in differences */
hypre_ParCSRMatrixFillSmooth(
hypre_ParAMGDataNumSamples(amg_data), SmoothVecs,
S, A, num_functions, dof_func);
minimax = hypre_ParCSRMatrixChooseThresh(S);
if (debug_flag >= 1)
hypre_printf("Minimax chosen: %f\n", minimax);
/* Threshold and compress */
hypre_ParCSRMatrixThreshold(S, thresh*minimax);
*S_ptr = S;
return 0;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGNormalizeVecs
*
* Normalize the smooth vectors and also make the first vector the constant
* vector
*
* inputs:
* n = length of smooth vectors
* num = number of smooth vectors
* V = smooth vectors (array of length n*num), also an output
*
* output:
* V = adjusted smooth vectors
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGNormalizeVecs(HYPRE_Int n, HYPRE_Int num, HYPRE_Real *V)
{
HYPRE_Int i, j;
HYPRE_Real nrm;
/* change first vector to the constant vector */
for (i=0; i<n; i++)
V[i] = 1.0;
for (j=0; j<num; j++)
{
nrm = mydnrm2(n, &V[j*n]);
mydscal(n, 1./nrm, &V[j*n]);
}
return 0;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGFitVectors
*
* Construct interpolation weights based on fitting smooth vectors
*
* inputs:
* ip = row number of row in P being processed (0-based)
* n = length of smooth vectors
* num = number of smooth vectors
* V = smooth vectors (array of length n*num), also an output
* nc = number of coarse grid points
* ind = indices of coarse grid points (0-based)
*
* output:
* val = interpolation weights for the coarse grid points
* V = smooth vectors; first one has been changed to constant vector;
* vectors have also been normalized; this is also an input
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGFitVectors(HYPRE_Int ip, HYPRE_Int n, HYPRE_Int num, const HYPRE_Real *V,
HYPRE_Int nc, const HYPRE_Int *ind, HYPRE_Real *val)
{
HYPRE_Real *a, *b;
HYPRE_Real *ap;
HYPRE_Int i, j;
HYPRE_Real *work;
HYPRE_Int work_size;
HYPRE_Int info;
HYPRE_Int temp;
/*
hypre_printf("Fit: row %d, n %d num %d, nc = %d ", ip, n, num, nc);
for (i=0; i<nc; i++)
hypre_printf("%d ", ind[i]);
hypre_printf("\n");
*/
if (nc == 0)
return 0;
work_size = 2000*64;
work = hypre_CTAlloc(HYPRE_Real, work_size, HYPRE_MEMORY_HOST);
a = hypre_CTAlloc(HYPRE_Real, num*nc, HYPRE_MEMORY_HOST);
ap = a;
for (j=0; j<nc; j++)
{
for (i=0; i<num; i++)
{
*ap = V[i*n+ind[j]];
ap++;
}
}
temp = MAX(nc, num);
b = hypre_CTAlloc(HYPRE_Real, temp, HYPRE_MEMORY_HOST);
for (i=0; i<num; i++)
b[i] = V[i*n+ip];
{
char trans = 'N';
HYPRE_Int one = 1;
hypre_dgels(&trans, &num, &nc, &one, a, &num,
b, &temp, work, &work_size, &info);
if (info != 0)
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"par_gsmg: dgels returned %d\n");
/* copy solution into output vector */
for (j=0; j<nc; j++)
val[j] = b[j];
}
hypre_TFree(b, HYPRE_MEMORY_HOST);
hypre_TFree(a, HYPRE_MEMORY_HOST);
hypre_TFree(work, HYPRE_MEMORY_HOST);
return info;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpLS
*
* Interpolation built from fitting smooth vectors
* - sequential version only
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpLS( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int num_smooth,
HYPRE_Real *SmoothVecs,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
/* HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); */
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
/* HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); */
HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd);
/* HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); */
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *S_ext;
//HYPRE_Real *S_ext_data;
//HYPRE_Int *S_ext_i;
//HYPRE_BigInt *S_ext_j;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker;
/* HYPRE_Int *P_marker_offd; */
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
/* HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd; */
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
//HYPRE_BigInt *big_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_S_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(S);
comm_pkg = hypre_ParCSRMatrixCommPkg(S);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of S
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1);
//S_ext_i = hypre_CSRMatrixI(S_ext);
//S_ext_j = hypre_CSRMatrixBigJ(S_ext);
//S_ext_data = hypre_CSRMatrixData(S_ext);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
/* removed */
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_S_offd, HYPRE_MEMORY_HOST);
big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
big_buf_data[index++]
= my_first_cpt+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
HYPRE_Int kk;
HYPRE_Int indices[1000]; /* kludge */
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
kk = 0;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i1];
jj_counter++;
indices[kk] = i1;
kk++;
}
}
hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs,
kk, indices, &P_diag_data[P_diag_i[i]]);
/* Off-Diagonal part of P */
/* undone */
}
}
}
P_diag_i[i] = jj_counter; /* check that this is in right place for threads */
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(S),
total_global_cpts,
hypre_ParCSRMatrixColStarts(S),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_marker[i] = P_offd_j[i];
hypre_qsort0(P_marker, 0, P_offd_size-1);
num_cols_P_offd = 1;
index = P_marker[0];
for (i=1; i < P_offd_size; i++)
{
if (P_marker[i] > index)
{
index = P_marker[i];
P_marker[num_cols_P_offd++] = index;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_P_offd; i++)
tmp_map_offd[i] = P_marker[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
//hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpGSMG
*
* Difference with hypre_BoomerAMGBuildInterp is that S contains values
* and is used to build interpolation weights. Matrix A is not used.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpGSMG( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S);
HYPRE_Int *tmp_map_offd = NULL;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *CF_marker_offd;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *S_ext;
HYPRE_Real *S_ext_data;
HYPRE_Int *S_ext_i;
HYPRE_BigInt *S_ext_j;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
//HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_BigInt big_i2;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int start;
HYPRE_Int c_num;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(S);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(S_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
total_global_cpts = 0; /* we will set this later for the matrix in the setup */
/* if (myid == (num_procs -1)) total_global_cpts = coarse_pts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);*/
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_S_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(S);
comm_pkg = hypre_ParCSRMatrixCommPkg(S);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of S
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixBigJ(S_ext);
S_ext_data = hypre_CSRMatrixData(S_ext);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_S_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
jj_end_row_offd = jj_counter_offd;
/* Loop over ith row of S. First, the diagonal part of S */
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += S_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of S for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
i2 = S_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row)
sum += S_diag_data[jj1];
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
i2 = S_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd)
sum += S_offd_data[jj1];
}
}
if (sum != 0)
{
distribute = S_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of S for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
i2 = S_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row)
P_diag_data[P_marker[i2]]
+= distribute * S_diag_data[jj1];
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
i2 = S_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i2]]
+= distribute * S_offd_data[jj1];
}
}
}
else
{
/* do nothing */
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else
{
/* do nothing */
}
}
/*----------------------------------------------------------------
* Still looping over ith row of S. Next, loop over the
* off-diagonal part of S
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += S_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of S_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = S_offd_j[jj];
for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++)
{
big_i2 = S_ext_j[jj1];
if (big_i2 >= col_1 && big_i2 < col_n)
{
/* in the diagonal block */
if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row)
sum += S_ext_data[jj1];
}
else
{
/* in the off_diagonal block */
j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd);
if (j != -1)
{
if (P_marker_offd[j] >= jj_begin_row_offd)
sum += S_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = S_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of S_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++)
{
big_i2 = S_ext_j[jj1];
if (big_i2 >= col_1 && big_i2 < col_n) /* in the diagonal block */
{
if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row)
P_diag_data[P_marker[(HYPRE_Int)(big_i2-col_1)]]
+= distribute * S_ext_data[jj1];
}
else
{
/* check to see if it is in the off_diagonal block */
j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd);
if (j != -1)
{
if (P_marker_offd[j] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[j]]
+= distribute * S_ext_data[jj1];
}
}
}
}
else
{
/* do nothing */
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else
{
/* do nothing */
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
sum = 0.;
for (jj = jj_begin_row; jj < jj_end_row; jj++)
sum += P_diag_data[jj];
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
sum += P_offd_data[jj];
for (jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= sum;
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= sum;
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(S),
total_global_cpts,
hypre_ParCSRMatrixColStarts(S),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_marker[i] = P_offd_j[i];
hypre_qsort0(P_marker, 0, P_offd_size-1);
num_cols_P_offd = 1;
index = P_marker[0];
for (i=1; i < P_offd_size; i++)
{
if (P_marker[i] > index)
{
index = P_marker[i];
P_marker[num_cols_P_offd++] = index;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_P_offd; i++)
tmp_map_offd[i] = P_marker[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);
return(0);
}
|
beamform.h | /*
* @Author: Philippe Dales
* @Date: 2018-07-26 14:26:23
* @Last Modified by: Philippe Dales
* @Last Modified time: 2018-07-26 14:26:23
*/
/*
Beamforming functions.
*/
#ifndef BEAMFORM_H
#define BEAMFORM_H
#include "xseis/process.h"
#include "xseis/structures.h"
#include <random>
#include <omp.h>
namespace beamform {
void InterLocBlocks(Array2D<float>& data_cc, Array2D<uint16_t>& ckeys, Array2D<uint16_t>& ttable, Vector<float>& output, uint32_t blocksize=1024 * 8, float scale_pwr=100)
{
// Divide grid into chunks to prevent cache invalidations during writing (see Ben Baker migrate)
// This uses less memory but was a bit slower atleast in my typical grid/ccfs sizes
// UPdate: When grid sizes >> nccfs and using more than 15 cores faster than InterLoc above
// note these asserts dont work when called through cython (python owned memory)
assert((uintptr_t) data_cc.row(1) % MEM_ALIGNMENT == 0);
assert((uintptr_t) &output[0] % MEM_ALIGNMENT == 0);
// assert((uintptr_t) ttable.row(1) % MEM_ALIGNMENT == 0);
// const size_t cclen = data_cc.ncol_;
const uint16_t hlen = data_cc.ncol_ / 2;
const size_t ncc = data_cc.nrow_;
const uint32_t ngrid = ttable.ncol_;
uint32_t blocklen;
uint16_t *tts_sta1, *tts_sta2;
float *cc_ptr = nullptr;
float *out_ptr = nullptr;
// printf("blocksize %lu, ngrid %lu \n", blocksize, ngrid);
#pragma omp parallel for private(tts_sta1, tts_sta2, cc_ptr, out_ptr, blocklen)
for(uint32_t iblock = 0; iblock < ngrid; iblock += blocksize) {
blocklen = std::min(ngrid - iblock, blocksize);
out_ptr = output.data_ + iblock;
std::fill(out_ptr, out_ptr + blocklen, 0);
for (size_t i = 0; i < ncc; ++i) {
tts_sta1 = ttable.row(ckeys(i, 0)) + iblock;
tts_sta2 = ttable.row(ckeys(i, 1)) + iblock;
cc_ptr = data_cc.row(i);
// Migrate single ccf on to grid based on tt difference
// #pragma omp simd aligned(tts_sta1, tts_sta2, out_ptr, cc_ptr: MEM_ALIGNMENT)
#pragma omp simd aligned(out_ptr, cc_ptr: MEM_ALIGNMENT)
for (size_t j = 0; j < blocklen; ++j) {
out_ptr[j] += cc_ptr[hlen + tts_sta2[j] - tts_sta1[j]];
}
}
}
float norm = scale_pwr / static_cast<float>(ncc);
for(size_t i = 0; i < output.size_; ++i) output[i] *= norm;
}
Vector<float> InterLoc(Array2D<float>& data_cc, Array2D<uint16_t>& ckeys, Array2D<uint16_t>& ttable, float scale_pwr=100)
{
// Each thread given own output buffer to prevent cache invalidations
const size_t hlen = data_cc.ncol_ / 2;
const size_t ncc = data_cc.nrow_;
const size_t ngrid = ttable.ncol_;
const uint nthreads = omp_get_max_threads();
std::cout << "nthreads: " << nthreads << '\n';
auto buf_multi = Array2D<float>(nthreads, ngrid);
uint16_t *tts_sta1, *tts_sta2;
float *cc_ptr = nullptr;
#pragma omp parallel private(tts_sta1, tts_sta2, cc_ptr) num_threads(nthreads)
{
float *out_ptr = buf_multi.row(omp_get_thread_num());
std::fill(out_ptr, out_ptr + ngrid, 0);
// std::cout << "hi from thread: " << omp_get_thread_num() << '\n';
#pragma omp for
for (size_t i = 0; i < ncc; ++i)
{
tts_sta1 = ttable.row(ckeys(i, 0));
tts_sta2 = ttable.row(ckeys(i, 1));
cc_ptr = data_cc.row(i);
// Migrate single ccf on to grid based on tt difference
// #pragma omp simd \
// aligned(tts_sta1, tts_sta2, out_ptr, cc_ptr: MEM_ALIGNMENT)
for (size_t j = 0; j < ngrid; ++j) {
out_ptr[j] += cc_ptr[hlen + tts_sta2[j] - tts_sta1[j]];
}
}
}
// combine thread buffers into final output
auto buf = buf_multi.sum_rows();
float norm = scale_pwr / static_cast<float>(ncc);
for(size_t i = 0; i < buf.size_; ++i) {
buf[i] *= norm;
}
return buf;
}
void InterLocPatch(Array2D<float>& data_cc, Array2D<uint16_t>& ckeys, std::vector<uint>& ix_patch, Array2D<uint16_t>& ttable, Vector<float>& output, float scale_pwr=100)
{
assert((uintptr_t) output.data_ % MEM_ALIGNMENT == 0);
assert((uintptr_t) data_cc.row(1) % MEM_ALIGNMENT == 0);
assert((uintptr_t) ttable.row(1) % MEM_ALIGNMENT == 0);
const size_t hlen = data_cc.ncol_ / 2;
// const size_t ngrid = ttable.ncol_;
const size_t ngrid = output.size_;
output.fill(0);
float *out_ptr = output.begin();
float *cc_ptr = nullptr;
uint16_t *tts_sta1, *tts_sta2;
size_t ix;
for(size_t i = 0; i < ix_patch.size(); ++i) {
ix = ix_patch[i];
tts_sta1 = ttable.row(ckeys(ix, 0));
tts_sta2 = ttable.row(ckeys(ix, 1));
cc_ptr = data_cc.row(ix);
// Migrate single ccf on to grid based on tt difference
// #pragma omp simd aligned(out_ptr, cc_ptr: MEM_ALIGNMENT)
#pragma omp simd aligned(out_ptr, cc_ptr, tts_sta1, tts_sta2: MEM_ALIGNMENT)
for (size_t j = 0; j < ngrid; ++j) {
out_ptr[j] += cc_ptr[hlen + tts_sta2[j] - tts_sta1[j]];
}
}
float norm = scale_pwr / static_cast<float>(ix_patch.size());
for(size_t i = 0; i < output.size_; ++i) {
output[i] *= norm;
}
}
void InterLocPatchNoAlign(Array2D<float>& data_cc, Array2D<uint16_t>& ckeys, std::vector<uint>& ix_patch, Array2D<uint16_t>& ttable, Vector<float>& output, float scale_pwr=100)
{
assert((uintptr_t) output.data_ % MEM_ALIGNMENT == 0);
assert((uintptr_t) data_cc.row(1) % MEM_ALIGNMENT == 0);
// assert((uintptr_t) ttable.row(1) % MEM_ALIGNMENT == 0);
const size_t hlen = data_cc.ncol_ / 2;
// const size_t ngrid = ttable.ncol_;
const size_t ngrid = output.size_;
output.fill(0);
float *out_ptr = output.begin();
float *cc_ptr = nullptr;
uint16_t *tts_sta1, *tts_sta2;
size_t ix;
for(size_t i = 0; i < ix_patch.size(); ++i) {
ix = ix_patch[i];
tts_sta1 = ttable.row(ckeys(ix, 0));
tts_sta2 = ttable.row(ckeys(ix, 1));
cc_ptr = data_cc.row(ix);
// Migrate single ccf on to grid based on tt difference
// #pragma omp simd aligned(out_ptr, cc_ptr, tts_sta1, tts_sta2: MEM_ALIGNMENT)
#pragma omp simd aligned(out_ptr, cc_ptr: MEM_ALIGNMENT)
for (size_t j = 0; j < ngrid; ++j) {
out_ptr[j] += cc_ptr[hlen + tts_sta2[j] - tts_sta1[j]];
}
}
float norm = scale_pwr / static_cast<float>(ix_patch.size());
for(size_t i = 0; i < output.size_; ++i) {
output[i] *= norm;
}
}
// Vector<float> InterLocBlocks(Array2D<float>& data_cc, Array2D<uint16_t>& ckeys, Array2D<uint16_t>& ttable, size_t blocksize=1024 * 5, float scale_pwr=100)
// {
// // Divide grid into chunks to prevent cache invalidations during writing (see Ben Baker migrate)
// // This uses less memory but was a bit slower atleast in my typical grid/ccfs sizes
// // UPdate: When grid sizes >> nccfs and using more than 15 cores faster than InterLoc above
// // const size_t cclen = data_cc.ncol_;
// const size_t hlen = data_cc.ncol_ / 2;
// const size_t ncc = data_cc.nrow_;
// const size_t ngrid = ttable.ncol_;
// size_t blocklen;
// uint16_t *tts_sta1, *tts_sta2;
// float *cc_ptr = nullptr;
// float *out_ptr = nullptr;
// auto output = Vector<float>(ngrid);
// output.fill(0);
// // printf("blocksize %lu, ngrid %lu \n", blocksize, ngrid);
// #pragma omp parallel for private(tts_sta1, tts_sta2, cc_ptr, out_ptr, blocklen)
// for(size_t iblock = 0; iblock < ngrid; iblock += blocksize) {
// blocklen = std::min(ngrid - iblock, blocksize);
// out_ptr = output.data_ + iblock;
// // out_ptr = output.data_ + iblock * blocklen;
// // std::fill(out_ptr, out_ptr + blocklen, 0);
// for (size_t i = 0; i < ncc; ++i)
// {
// tts_sta1 = ttable.row(ckeys(i, 0)) + iblock;
// tts_sta2 = ttable.row(ckeys(i, 1)) + iblock;
// cc_ptr = data_cc.row(i);
// // Migrate single ccf on to grid based on tt difference
// #pragma omp simd \
// aligned(tts_sta1, tts_sta2, out_ptr, cc_ptr: MEM_ALIGNMENT)
// for (size_t j = 0; j < blocklen; ++j) {
// out_ptr[j] += cc_ptr[hlen + tts_sta2[j] - tts_sta1[j]];
// }
// }
// }
// float norm = scale_pwr / static_cast<float>(ncc);
// for(size_t i = 0; i < output.size_; ++i) {
// output[i] *= norm;
// }
// // printf("completed\n");
// return output;
// }
void TTCheckValid(Array2D<uint16_t>& ckeys, Array2D<uint16_t>& ttable, uint32_t const wlen)
{
// const size_t cclen = data_cc.ncol_;
const size_t hlen = wlen / 2;
const size_t ncc = ckeys.nrow_;
const size_t ngrid = ttable.ncol_;
// #pragma omp for
for (size_t i = 0; i < ncc; ++i)
{
uint16_t *tts_sta1 = ttable.row(ckeys(i, 0));
uint16_t *tts_sta2 = ttable.row(ckeys(i, 1));
for (size_t j = 0; j < ngrid; ++j) {
int cix = hlen + tts_sta2[j] - tts_sta1[j];
assert(cix >= 0);
assert(cix < wlen);
}
}
}
Vector<float> InterLocBlocksPatch(Array2D<float>& data_cc, Array2D<uint16_t>& ckeys, std::vector<uint>& ix_patch, Array2D<uint16_t>& ttable, size_t blocksize=1024 * 5, float scale_pwr=100)
{
// Divide grid into chunks to prevent cache invalidations during writing (see Ben Baker migrate)
// This uses less memory but was a bit slower atleast in my typical grid/ccfs sizes
// UPdate: When grid sizes >> nccfs and using more than 15 cores faster than InterLoc above
// const size_t cclen = data_cc.ncol_;
const size_t hlen = data_cc.ncol_ / 2;
const size_t ngrid = ttable.ncol_;
size_t blocklen;
uint16_t *tts_sta1, *tts_sta2;
float *cc_ptr = nullptr;
float *out_ptr = nullptr;
auto output = Vector<float>(ngrid);
output.fill(0);
// printf("blocksize %lu, ngrid %lu \n", blocksize, ngrid);
#pragma omp parallel for private(tts_sta1, tts_sta2, cc_ptr, out_ptr, blocklen)
for(size_t iblock = 0; iblock < ngrid; iblock += blocksize) {
blocklen = std::min(ngrid - iblock, blocksize);
out_ptr = output.data_ + iblock;
// out_ptr = output.data_ + iblock * blocklen;
// std::fill(out_ptr, out_ptr + blocklen, 0);
// for (size_t i = 0; i < ncc; ++i)
for(auto&& i : ix_patch) {
tts_sta1 = ttable.row(ckeys(i, 0)) + iblock;
tts_sta2 = ttable.row(ckeys(i, 1)) + iblock;
cc_ptr = data_cc.row(i);
// Migrate single ccf on to grid based on tt difference
// #pragma omp simd \
aligned(tts_sta1, tts_sta2, out_ptr, cc_ptr: MEM_ALIGNMENT)
for (size_t j = 0; j < blocklen; ++j) {
out_ptr[j] += cc_ptr[hlen + tts_sta2[j] - tts_sta1[j]];
}
}
}
float norm = scale_pwr / static_cast<float>(ix_patch.size());
for(size_t i = 0; i < output.size_; ++i) {
output[i] *= norm;
}
return output;
}
// Delay and summing raw waveforms for all gridlocs for all possible starttimes
// requires transposed ttable
void NaiveSearch(Array2D<float>& data, Array2D<uint16_t>& ttable, size_t tmin, size_t tmax, Vector<float>& wpower, Vector<size_t>& wlocs)
{
// std::cout << data.nrow_ << '\n';
// std::cout << ttable.ncol_ << '\n';
size_t nt = tmax - tmin;
size_t nsig = data.nrow_;
size_t ngrid = ttable.nrow_;
// auto tt_ixs = Vector<size_t>(nsig);
auto tmp_stack = Vector<float>(nt);
process::Fill(wpower, 0);
float* best_vals = wpower.data_;
size_t* best_locs = wlocs.data_;
// auto win_val = Vector<float>(nt);
// auto win_loc = Vector<size_t>(nt);
float *dptr = nullptr;
uint16_t *tt_ixs = nullptr;
// printf("Searching grid points: %lu to %lu\n", gix_start, gix_end);
std::cout << "nt: " << nt << '\n';
std::cout << "ngrid: " << ngrid << '\n';
std::cout << "nsig: " << nsig << '\n';
for (size_t ipt = 0; ipt < ngrid; ++ipt)
{
tmp_stack.fill(0);
tt_ixs = ttable.row(ipt);
if (ipt % 1000 == 0) {
printf("Progress: %.2f \n", ((float)(ipt) / (ngrid) * 100));
}
// For each channel add time comb values to output
for (size_t i = 0; i < nsig; ++i)
{
dptr = data.row(i) + tt_ixs[i] + tmin;
for (size_t j = 0; j < nt; ++j)
{
tmp_stack[j] += dptr[j];
}
}
// #pragma omp simd aligned(tts_sta1, tts_sta2, out_ptr, cc_ptr: MEM_ALIGNMENT)
for (size_t j = 0; j < nt; ++j)
{
if (std::abs(tmp_stack[j]) > std::abs(best_vals[j])) {
best_vals[j] = tmp_stack[j];
best_locs[j] = ipt;
}
}
}
}
// // Delay and summing raw waveforms for all gridlocs for all possible starttimes
// // requires transposed ttable
// void MFPSum(Array2D<float>& data, Array2D<uint16_t>& ttable, uint32_t wlen, std::vector<size_t> otimes, fftwf_plan& plan_fwd, Vector<float>& wpower, Vector<size_t>& wlocs)
// {
// // std::cout << data.nrow_ << '\n';
// // std::cout << ttable.ncol_ << '\n';
// size_t nt = otimes.size();
// size_t nsig = data.nrow_;
// size_t ngrid = ttable.nrow_;
// uint32_t hlen = wlen / 2;
// // auto tt_ixs = Vector<size_t>(nsig);
// auto tmp_stack = Vector<float>(nt);
// process::Fill(wpower, 0);
// float* best_vals = wpower.data_;
// size_t* best_locs = wlocs.data_;
// // size_t nfreq = wlen / 2 + 1;
// // auto fstack = Vector<fftwf_complex>(nfreq);
// // // auto fsig = Vector<fftwf_complex>(nfreq);
// // // auto fptr = fsig.data_;
// // auto fptr = fftwf_alloc_complex(nfreq);
// auto fstack = Vector<float>(nfreq);
// // printf("Searching grid points: %lu to %lu\n", gix_start, gix_end);
// std::cout << "nt: " << nt << '\n';
// std::cout << "ngrid: " << ngrid << '\n';
// std::cout << "nsig: " << nsig << '\n';
// for (size_t ipt = 0; ipt < ngrid; ++ipt)
// {
// tmp_stack.fill(0);
// uint16_t *tt_ixs = ttable.row(ipt);
// if (ipt % 1000 == 0) {
// printf("Progress: %.2f \n", ((float)(ipt) / (ngrid) * 100));
// }
// for (size_t i = 0; i < nt; ++i)
// {
// size_t ot = otimes[i];
// process::Fill(fstack, 0);
// printf("Ot: %lu \n", ot);
// for (size_t j = 0; j < nsig; ++j)
// {
// // std::cout << "j: " << j << '\n';
// // float *dptr = data.row(j) + ot + tt_ixs[j] - hlen;
// float *dptr = data.row(j) + tt_ixs[j];
// fftwf_execute_dft_r2c(plan_fwd, dptr, fptr);
// std::cout << "descrip: " << ot + tt_ixs[j] - hlen << '\n';
// process::Accumulate(fptr, fstack.data_, nfreq);
// }
// tmp_stack[i] = process::Energy(fstack.data_, nfreq);
// }
// // #pragma omp simd aligned(tts_sta1, tts_sta2, out_ptr, cc_ptr: MEM_ALIGNMENT)
// for (size_t j = 0; j < nt; ++j)
// {
// if (std::abs(tmp_stack[j]) > std::abs(best_vals[j])) {
// best_vals[j] = tmp_stack[j];
// best_locs[j] = ipt;
// }
// }
// }
// }
// Uses constant velocity medium, introduce random errors
Array2D<uint16_t> BuildTTablePerturbVel(Array2D<float>& stalocs, Array2D<float>& gridlocs, float vel, float sr, float perturb)
{
auto ttable = Array2D<uint16_t>(stalocs.nrow_, gridlocs.nrow_);
// float vsr = sr / vel;
float dist;
float *sloc = nullptr;
uint16_t *tt_row = nullptr;
std::mt19937::result_type seed = time(0);
auto rand = std::bind(std::uniform_real_distribution<float>(vel - perturb, vel + perturb), std::mt19937(seed));
#pragma omp parallel for private(sloc, tt_row, dist)
for (size_t i = 0; i < ttable.nrow_; ++i)
{
sloc = stalocs.row(i);
tt_row = ttable.row(i);
for (size_t j = 0; j < ttable.ncol_; ++j)
{
dist = process::DistCartesian(sloc, gridlocs.row(j));
tt_row[j] = static_cast<uint16_t>(dist * sr / rand() + 0.5);
}
}
return ttable;
}
// void FillTravelTimeTable(Array2D<float>& stalocs, Array2D<float>& gridlocs, float vel, float sr, Array2D<uint16_t>& ttable)
// {
// float vsr = sr / vel;
// float dist;
// float *sloc = nullptr;
// uint16_t *tt_row = nullptr;
// #pragma omp parallel for private(sloc, tt_row, dist)
// for (size_t i = 0; i < ttable.nrow_; ++i)
// {
// sloc = stalocs.row(i);
// tt_row = ttable.row(i);
// for (size_t j = 0; j < ttable.ncol_; ++j)
// {
// dist = process::DistCartesian(sloc, gridlocs.row(j));
// tt_row[j] = static_cast<uint16_t>(dist * vsr + 0.5);
// }
// }
// }
void FillTravelTimeTable(Array2D<float>& locs1, Array2D<float>& locs2, float vel, float sr, Array2D<uint16_t>& ttable)
{
float vsr = sr / vel;
float dist;
float *sloc = nullptr;
uint16_t *tt_row = nullptr;
#pragma omp parallel for private(sloc, tt_row, dist)
for (size_t i = 0; i < ttable.nrow_; ++i)
{
sloc = locs1.row(i);
tt_row = ttable.row(i);
for (size_t j = 0; j < ttable.ncol_; ++j)
{
dist = process::DistCartesian(sloc, locs2.row(j));
tt_row[j] = static_cast<uint16_t>(dist * vsr + 0.5);
}
}
}
// Uses constant velocity medium
Array2D<uint16_t> BuildTravelTimeTable(Array2D<float>& stalocs, Array2D<float>& gridlocs, float vel, float sr)
{
auto ttable = Array2D<uint16_t>(stalocs.nrow_, gridlocs.nrow_);
float vsr = sr / vel;
float dist;
float *sloc = nullptr;
uint16_t *tt_row = nullptr;
#pragma omp parallel for private(sloc, tt_row, dist)
for (size_t i = 0; i < ttable.nrow_; ++i)
{
sloc = stalocs.row(i);
tt_row = ttable.row(i);
for (size_t j = 0; j < ttable.ncol_; ++j)
{
dist = process::DistCartesian(sloc, gridlocs.row(j));
tt_row[j] = static_cast<uint16_t>(dist * vsr + 0.5);
}
}
return ttable;
}
// Uses 1D effective velocity model (1 value per meter)
Array2D<uint16_t> BuildTravelTimeTable(Array2D<float>& stalocs, Array2D<float>& gridlocs, Vector<float>& vel_effective, float sr)
{
size_t ngrid = gridlocs.nrow_;
size_t nsta = stalocs.nrow_;
auto ttable = Array2D<uint16_t>(nsta, ngrid);
// compute velocity sampling rate
auto vsr = Vector<float>(vel_effective.size_);
for (size_t i = 0; i < vsr.size_; ++i)
{
vsr[i] = sr / vel_effective[i];
}
auto vsr_grid = Vector<float>(ngrid);
float zval;
for (size_t i = 0; i < ngrid; ++i){
zval = gridlocs[i * 3 + 2];
if(zval < 0) {
zval = 0;
}
vsr_grid[i] = vsr[static_cast<uint16_t>(zval)];
}
float dist;
float *sloc = nullptr;
uint16_t *tt_row = nullptr;
#pragma omp parallel for private(sloc, tt_row, dist)
for (size_t i = 0; i < nsta; ++i)
{
sloc = stalocs.row(i);
tt_row = ttable.row(i);
for (size_t j = 0; j < ngrid; ++j)
{
dist = process::DistCartesian(sloc, gridlocs.row(j));
tt_row[j] = static_cast<uint16_t>(dist * vsr_grid[j] + 0.5);
}
}
return ttable;
}
Vector<uint16_t> GetTTOneToMany(float* loc_src, Array2D<float>& locs, float vel, float sr)
{
size_t nlocs = locs.nrow_;
auto tts = Vector<uint16_t>(nlocs);
float vsr = sr / vel;
float dist;
for (size_t j = 0; j < nlocs; ++j)
{
dist = process::DistCartesian(loc_src, locs.row(j));
tts[j] = static_cast<uint16_t>(dist * vsr + 0.5);
}
return tts;
}
size_t NChoose2(size_t n)
{
return (n * (n-1)) / 2;
}
Array2D<uint16_t> unique_pairs(uint nsig)
{
auto ckeys = Array2D<uint16_t>(NChoose2(nsig), 2);
size_t row_ix = 0;
for (uint i = 0; i < nsig; ++i)
{
for (uint j = i + 1; j < nsig; ++j)
{
ckeys(row_ix, 0) = i;
ckeys(row_ix, 1) = j;
row_ix += 1;
}
}
std::cout << "row_ix: " << row_ix << '\n';
std::cout << "nkeys: " << ckeys.nrow_ << '\n';
return ckeys;
}
Array2D<uint16_t> unique_pairs(Vector<uint16_t>& keys)
{
size_t npair = 0;
// crude way to calc nkeys (wil use dist filters later)
for (uint i = 0; i < keys.size_; ++i)
{
for (uint j = i + 1; j < keys.size_; ++j)
{
npair += 1;
}
}
auto ckeys = Array2D<uint16_t>(npair, 2);
size_t row_ix = 0;
for (uint i = 0; i < keys.size_; ++i)
{
for (uint j = i + 1; j < keys.size_; ++j)
{
ckeys(row_ix, 0) = keys[i];
ckeys(row_ix, 1) = keys[j];
row_ix += 1;
}
}
return ckeys;
}
std::vector<uint16_t> UniquePairsFlat(Vector<uint16_t>& keys)
{
std::vector<uint16_t> ckeys;
for (uint i = 0; i < keys.size_; ++i)
{
for (uint j = i + 1; j < keys.size_; ++j)
{
ckeys.push_back(keys[i]);
ckeys.push_back(keys[j]);
}
}
return ckeys;
}
std::vector<uint16_t> AllPairsFilt(Vector<uint16_t>& keys, Array2D<float>& stalocs, float min_dist, float max_dist, bool ang_filt=true)
{
float min_ang = -0.14;
float max_ang = -0.10;
std::vector<uint16_t> ckeys;
float* loc1 = nullptr;
float* loc2 = nullptr;
float angle;
float dist;
for (uint i = 0; i < keys.size_; ++i)
{
loc1 = stalocs.row(keys[i]);
for (uint j = i + 1; j < keys.size_; ++j)
{
loc2 = stalocs.row(keys[j]);
dist = process::DistCartesian(loc1, loc2);
// printf("[%u,%u] %.2f \n",i, j, dist);
if (dist > min_dist && dist < max_dist)
{
if (ang_filt == true)
{
angle = process::AngleBetweenPoints(loc1, loc2);
if(min_ang < angle && angle < max_ang) {
continue;
}
}
ckeys.push_back(keys[i]);
ckeys.push_back(keys[j]);
}
}
}
return ckeys;
}
Array2D<uint16_t> AllPairsDistFilt(Vector<uint16_t>& keys, Array2D<float>& stalocs, float min_dist, float max_dist)
{
// size_t npair = 0;
size_t npair_max = NChoose2(keys.size_);
printf("max pairs %lu\n", npair_max);
auto ckeys = Array2D<uint16_t>(npair_max, 2);
size_t row_ix = 0;
float dist;
float* loc1 = nullptr;
float* loc2 = nullptr;
for (uint i = 0; i < keys.size_; ++i)
{
loc1 = stalocs.row(keys[i]);
for (uint j = i + 1; j < keys.size_; ++j)
{
loc2 = stalocs.row(keys[j]);
dist = process::DistCartesian(loc1, loc2);
// printf("[%u,%u] %.2f \n",i, j, dist);
if (dist > min_dist && dist < max_dist)
{
// printf("%u\n", row_ix);
ckeys(row_ix, 0) = keys[i];
ckeys(row_ix, 1) = keys[j];
row_ix += 1;
}
}
}
auto ckeys2 = Array2D<uint16_t>(row_ix, 2);
for(uint i = 0; i < ckeys2.size_; ++i) {
ckeys2[i] = ckeys[i];
}
return ckeys2;
}
Array2D<uint16_t> AllPairsDistAngleFilt(Vector<uint16_t>& keys, Array2D<float>& stalocs, float min_dist, float max_dist)
{
// size_t npair = 0;
size_t npair_max = NChoose2(keys.size_);
printf("max pairs %lu\n", npair_max);
auto ckeys = Array2D<uint16_t>(npair_max, 2);
size_t row_ix = 0;
float dist;
float* loc1 = nullptr;
float* loc2 = nullptr;
float angle;
for (uint i = 0; i < keys.size_; ++i)
{
loc1 = stalocs.row(keys[i]);
for (uint j = i + 1; j < keys.size_; ++j)
{
loc2 = stalocs.row(keys[j]);
dist = process::DistCartesian(loc1, loc2);
// printf("[%u,%u] %.2f \n",i, j, dist);
if (dist > min_dist && dist < max_dist)
{
angle = process::AngleBetweenPoints(loc1, loc2);
if(angle < -0.14 || angle > -0.10) {
ckeys(row_ix, 0) = keys[i];
ckeys(row_ix, 1) = keys[j];
row_ix += 1;
}
}
}
}
auto ckeys2 = Array2D<uint16_t>(row_ix, 2);
for(uint i = 0; i < ckeys2.size_; ++i) {
ckeys2[i] = ckeys[i];
}
return ckeys2;
}
Array2D<uint16_t> BuildNPairsDistFilt(Vector<uint16_t>& keys, Array2D<float>& stalocs, float min_dist, float max_dist, uint ncc)
{
auto ckeys = Array2D<uint16_t>(ncc, 2);
std::mt19937::result_type seed = time(0);
auto rand_int = std::bind(std::uniform_int_distribution<uint>(0, keys.size_), std::mt19937(seed));
uint16_t k1, k2;
float dist;
float* loc1 = nullptr;
float* loc2 = nullptr;
uint i = 0;
while(i < ncc) {
k1 = rand_int();
k2 = rand_int();
if(k1 != k2) {
loc1 = stalocs.row(keys[k1]);
loc2 = stalocs.row(keys[k2]);
dist = process::DistCartesian(loc1, loc2);
// printf("[%u,%u] %.2f \n",i, j, dist);
if (dist > min_dist && dist < max_dist)
{
ckeys.row(i)[0] = keys[k1];
ckeys.row(i)[1] = keys[k2];
i++;
}
}
}
return ckeys;
}
Array2D<uint16_t> BuildNPairsDistAngleFilt(Vector<uint16_t>& keys, Array2D<float>& stalocs, float min_dist, float max_dist, uint ncc)
{
auto ckeys = Array2D<uint16_t>(ncc, 2);
std::mt19937::result_type seed = time(0);
auto rand_int = std::bind(std::uniform_int_distribution<uint>(0, keys.size_), std::mt19937(seed));
uint16_t k1, k2;
float dist, angle;
float* loc1 = nullptr;
float* loc2 = nullptr;
uint i = 0;
while(i < ncc) {
k1 = rand_int();
k2 = rand_int();
if(k1 != k2) {
loc1 = stalocs.row(keys[k1]);
loc2 = stalocs.row(keys[k2]);
dist = process::DistCartesian(loc1, loc2);
// printf("[%u,%u] %.2f \n",i, j, dist);
if (dist > min_dist && dist < max_dist)
{
angle = process::AngleBetweenPoints(loc1, loc2);
if(angle < -0.14 || angle > -0.10) {
ckeys.row(i)[0] = keys[k1];
ckeys.row(i)[1] = keys[k2];
i++;
}
}
}
}
return ckeys;
}
uint TotalNPairsDistAngleFilt(Vector<uint16_t>& keys, Array2D<float>& stalocs, float min_dist, float max_dist)
{
// auto ckeys = Array2D<uint16_t>(ncc, 2);
uint ncc = 0;
// std::mt19937::result_type seed = time(0);
// auto rand_int = std::bind(std::uniform_int_distribution<uint>(0, keys.size_), std::mt19937(seed));
uint16_t k1, k2;
float dist, angle;
float* loc1 = nullptr;
float* loc2 = nullptr;
uint ntot = 0;
for(size_t i = 0; i < keys.size_; ++i) {
for(size_t j = i + 1; j < keys.size_; ++j) {
ntot++;
loc1 = stalocs.row(keys[i]);
loc2 = stalocs.row(keys[j]);
dist = process::DistCartesian(loc1, loc2);
// printf("[%u,%u] %.2f \n",i, j, dist);
if (dist > min_dist && dist < max_dist)
{
angle = process::AngleBetweenPoints(loc1, loc2);
if(angle < -0.14 || angle > -0.10) {
ncc++;
}
}
}
}
std::cout << "nt: " << ntot << '\n';
return ncc;
}
// Vector<uint16_t> GetStationKeysNear(Vector<float>& loc, Array2D<float>& stalocs, float max_dist) {
// std::vector<uint16_t> stakeep;
// float dist;
// for(size_t i = 0; i < stalocs.nrow_; ++i) {
// dist = process::DistCartesian2D(loc.data_, stalocs.row(i));
// if(dist < max_dist) {
// stakeep.push_back(i);
// }
// }
// auto out = Vector<uint16_t>(stakeep);
// return out;
// }
// Vector<float> DistDiffFromCkeys(Array2D<uint16_t>& ckeys, Array2D<float>& stalocs, float sr) {
// auto dist_diff = Vector<float>(ckeys.nrow_);
// uint16_t *ckp = nullptr;
// for(size_t i = 0; i < ckeys.nrow_; ++i) {
// ckp = ckeys.row(i);
// dist_diff[i] = process::DistCartesian(stalocs.row(ckp[0]), stalocs.row(ckp[1]));
// }
// return dist_diff;
// }
// // Build groups of ckeys for stas within radius of mid_stas
// std::vector<std::vector<uint16_t>> CkeyPatchesFromStations(std::vector<uint>& mid_stas, Array2D<float>& stalocs, float radius, float cdist_min, float cdist_max, bool ang_filt=true)
// {
// // std::vector<uint16_t> ckeys_vec;
// std::vector<std::vector<uint16_t>> patches;
// for(auto&& ix : mid_stas) {
// auto loc_patch = stalocs.row_view(ix);
// auto pkeys = GetStationKeysNear(loc_patch, stalocs, radius);
// patches.push_back(AllPairsFilt(pkeys, stalocs, cdist_min, cdist_max, ang_filt));
// }
// return patches;
// }
// // Builds ckey index from groups of ckeys
// std::vector<std::vector<uint>> IndexesFromCkeyPatches(std::vector<std::vector<uint16_t>>& patches)
// {
// std::vector<std::vector<uint>> ipatches;
// size_t csum = 0;
// for(auto&& patch : patches) {
// size_t ncc = patch.size() / 2;
// std::vector<uint> ipatch;
// ipatch.reserve(ncc);
// for(size_t i = 0; i < ncc; ++i) {
// ipatch.push_back(i + csum);
// }
// ipatches.push_back(ipatch);
// csum += ncc;
// }
// return ipatches;
// }
// // Builds ckey index from groups of ckeys
// Array2D<uint16_t> CkeysFromPatches(std::vector<std::vector<uint16_t>>& patches)
// {
// // std::vector<std::vector<size_t>> ipatches;
// size_t ncc = 0;
// for(auto&& patch : patches) {ncc += patch.size() / 2;}
// auto ckeys = Array2D<uint16_t>(ncc, 2);
// size_t csum = 0;
// auto ptr = ckeys.data_;
// for(auto&& vec : patches) {
// std::copy(vec.begin(), vec.end(), ptr);
// ptr += vec.size();
// }
// return ckeys;
// }
// ckeys_vec.insert(ckeys_vec.end(), ck_patch.begin(), ck_patch.end());
// uint patch_len = ck_patch.size() / 2;
// std::vector<uint> ipatch(patch_len);
// for(size_t i = 0; i < patch_len; ++i) {
// ipatch.push_back(i + csum);
// }
// ipatches.push_back(ipatch);
// csum += patch_len;
// std::cout << "nkeys: " << patch_len << '\n';
std::vector<float> MaxAndLoc(Vector<float>& power, Array2D<float>& gridlocs) {
size_t amax = std::distance(power.data_,
std::max_element(power.begin(), power.end()));
// float max = output.max();
// size_t amax = output.argmax();
float *wloc = gridlocs.row(amax);
std::vector<float> stats = {power[amax], wloc[0], wloc[1], wloc[2]};
return stats;
}
std::vector<uint32_t> VAMax(Vector<float>& power, uint32_t scale=10000) {
uint32_t amax = process::argmax(power);
uint32_t vmax = static_cast<uint32_t>(power[amax] * scale);
std::vector<uint32_t> stats = {vmax, amax};
return stats;
}
std::vector<uint32_t> VASMax(Vector<float>& power, uint32_t scale=10000) {
float sd = process::standard_deviation(power.data_, power.size_);
float mean = process::mean(power.data_, power.size_);
uint32_t amax = process::argmax(power);
// uint32_t vmax = static_cast<uint32_t>(power[amax] * scale) ;
uint32_t vstd = static_cast<uint32_t>((power[amax] - mean) / sd * scale) ;
std::vector<uint32_t> stats = {vstd, amax};
// std::vector<uint32_t> stats = {vmax, vstd, amax};
return stats;
}
std::vector<uint32_t> MADMax(Vector<float>& power, uint32_t scale=10000) {
// destroys input
size_t npts = power.size_;
auto ptr = power.data_;
uint32_t amax = process::argmax(power);
float vmax = power[amax];
size_t half = npts / 2;
std::nth_element(ptr, ptr + half, ptr + npts);
float med = ptr[half];
// float mean = process::mean(ptr, npts);
for(size_t i = 0; i < npts; ++i) {
ptr[i] = std::abs(ptr[i] - med);
}
std::nth_element(ptr, ptr + half, ptr + npts);
float mad = ptr[half];
// float mdev = (vmax - med) / mad;
uint32_t mdev = static_cast<uint32_t>((vmax - med) / mad * scale) ;
std::vector<uint32_t> stats = {mdev, amax};
return stats;
}
Vector<float> out2MAD(Vector<float>& power, uint32_t scale=10000) {
// destroys input
size_t npts = power.size_;
auto ptr = power.data_;
auto out = Vector<float>(npts);
process::Copy(ptr, npts, out.data_);
size_t half = npts / 2;
std::nth_element(ptr, ptr + half, ptr + npts);
float med = ptr[half];
for(size_t i = 0; i < npts; ++i) {
ptr[i] = std::abs(ptr[i] - med);
}
std::nth_element(ptr, ptr + half, ptr + npts);
float mad = ptr[half];
for(size_t i = 0; i < npts; ++i) {
float val = (out[i] - med) / mad;
ptr[i] = out[i];
out[i] = val;
}
return out;
}
std::vector<float> VAMMax(Vector<float>& power) {
float sd = process::standard_deviation(power.data_, power.size_);
float mean = process::mean(power.data_, power.size_);
size_t amax = process::argmax(power);
float vmax = power[amax];
std::vector<float> stats = {(float) amax, vmax, mean, sd};
return stats;
}
std::vector<float> SDMaxAndLoc(Vector<float>& power, Array2D<float>& gridlocs) {
size_t amax = std::distance(power.data_,
std::max_element(power.begin(), power.end()));
float *wloc = gridlocs.row(amax);
float sd = process::standard_deviation(power.data_, power.size_);
float val = power[amax] / sd;
std::vector<float> stats = {val, wloc[0], wloc[1], wloc[2]};
return stats;
}
// Array2D<float> InterLocOld(Array2D<float>& data_cc, Array2D<uint16_t>& ckeys, Array2D<uint16_t>& ttable, uint16_t nthreads)
// {
// // Each thread given own output buffer to prevent cache invalidations
// const size_t cclen = data_cc.ncol_;
// const size_t ncc = data_cc.nrow_;
// const size_t ngrid = ttable.ncol_;
// uint16_t *tts_sta1, *tts_sta2;
// float *cc_ptr = nullptr;
// auto output = Array2D<float>(nthreads, ngrid);
// size_t niter = 0;
// #pragma omp parallel private(tts_sta1, tts_sta2, cc_ptr) num_threads(nthreads)
// {
// float *out_ptr = output.row(omp_get_thread_num());
// std::fill(out_ptr, out_ptr + ngrid, 0);
// // play around with omp loop scheduling here
// #pragma omp for
// for (size_t i = 0; i < ncc; ++i)
// {
// // if (i % 10000 == 0) {
// // printf("Prog: %.2f \r", ((float) i / ncc * 100));
// // std::cout.flush();
// // }
// tts_sta1 = ttable.row(ckeys(i, 0));
// tts_sta2 = ttable.row(ckeys(i, 1));
// cc_ptr = data_cc.row(i);
// // Migrate single ccf on to grid based on tt difference
// #pragma omp simd \
// aligned(tts_sta1, tts_sta2, out_ptr, cc_ptr: MEM_ALIGNMENT)
// for (size_t j = 0; j < ngrid; ++j)
// {
// // Get appropriate ix of unrolled ccfs (same as mod_floor)
// // by wrapping negative traveltime differences
// // if-else much faster than more elegant mod function
// if (tts_sta2[j] >= tts_sta1[j])
// {
// out_ptr[j] += cc_ptr[tts_sta2[j] - tts_sta1[j]];
// }
// else
// {
// out_ptr[j] += cc_ptr[cclen - tts_sta1[j] + tts_sta2[j]];
// }
// }
// }
// }
// return output;
// }
// std::vector<uint16_t> GetStationKeysNear(Vector<float>& loc, Array2D<float>& stalocs, float max_dist) {
// std::vector<uint16_t> stakeep;
// float dist;
// for(size_t i = 0; i < stalocs.nrow_; ++i) {
// dist = process::DistCartesian2D(loc.data_, stalocs.row(i));
// if(dist < max_dist) {
// stakeep.push_back(i);
// }
// }
// // auto out = Vector<uint16_t>(stakeep);
// return stakeep;
// }
// Array2D<float> EnergyCC(Array2D<float>& data_cc) {
// /* code */
// }
// auto energy_cc = Array2D<float>(data_cc.nrow_, 4);
// float e1;
// float e2;
// float enoise1;
// float enoise2;
// float *sig = nullptr;
// uint ixp;
// uint hlen = npts / 2;
// for(size_t i = 0; i < data_cc.nrow_; ++i) {
// sig = data_cc.row(i);
// ixp = ixphys[i];
// printf("%d\n", i);
// enoise1 = process::rms_energy(sig + ixp, hlen - ixp);
// enoise2 = process::rms_energy(sig + hlen, hlen - ixp);
// e1 = process::rms_energy(sig, ixphys[i]);
// e2 = process::rms_energy(sig + npts - ixphys[i], npts);
// energy_cc(i, 0) = e1;
// energy_cc(i, 1) = e2;
// energy_cc(i, 2) = enoise1;
// energy_cc(i, 3) = enoise2;
// }
// void tt_homo_ix(Array2D<float> &sta_locs, float *src_loc, float vsr, Vector<size_t> &tts)
// {
// float dist;
// for (size_t j = 0; j < tts.size_; ++j) {
// dist = beamform::process::DistCartesian(src_loc, sta_locs.row(j));
// tts[j] = static_cast<size_t>(dist * vsr + 0.5);
// }
// }
// void tt_homo(Vector<float> &tts, Array2D<float> &sta_locs, float *src_loc, float velocity)
// {
// float dist;
// for (int32_t j = 0; j < sta_locs.nrow_; ++j) {
// // dist = process::DistCartesian(src_loc, &sta_locs[j]);
// dist = process::DistCartesian(src_loc, sta_locs.row(j));
// tts[j] = dist / velocity;
// }
// }
// void tt_diff(float *tts, float *tts_cc, int *ckeys, int ncc)
// {
// int key1, key2;
// for (int i = 0; i < ncc; ++i) {
// key1 = ckeys[i * 2];
// key2 = ckeys[i * 2 + 1];
// // printf("%d_%d\n", key1, key2);
// tts_cc[i] = tts[key2] - tts[key1];
// }
// }
// float slant_stack(Array2D<float> &data, Vector<float> &tts, Vector<int> &ix_keep, float src_time, float sr)
// {
// float sum = 0;
// int nvalid = 0;
// int32_t col_ix;
// for (int32_t i = 0; i < ix_keep.size_; ++i) {
// col_ix = (src_time + tts[i]) * sr;
// if (0 <= col_ix && col_ix < data.ncol_) {
// sum += data(i, col_ix);
// // printf("i: %d col_ix: %d, val: %.2f\n", i, col_ix, sig_ptr[col_ix]);
// nvalid++;
// }
// }
// if (nvalid == 0){
// return 0;
// }
// else {
// return sum / nvalid;
// }
// }
// float slant_stack_no_check(Array2D<float> &data, Vector<float> &tts, Vector<int> &ix_keep, float src_time, float sr){
// // No bounds checking, care for segfaults
// float sum = 0;
// // float t_exp;
// // int32_t col_ix;
// int col_ix;
// for (int32_t i = 0; i < ix_keep.size_; ++i) {
// // col_ix = (int32_t) (sr * (src_time + tts[i]));
// // col_ix = (int32_t) (sr * (src_time + tts[i]));
// // col_ix = static_cast<int32_t>((src_time + tts[i]) * sr);
// col_ix = static_cast<int>((src_time + tts[i]) * sr);
// // col_ix = (int) ((src_time + tts[i]) * sr);
// sum += data(i, col_ix);
// // sum += data(i, col_ix);
// }
// return sum / ix_keep.size_;
// }
// void beampower_homo(Array2D<float> &points, Vector<float> &out, Array2D<float> &data, Vector<float> &tts, Array2D<float> &sta_locs, Vector<int> &ix_keep, float velocity, float src_time, float sr) {
// // Vector<float> &tts2 = Vector<float>(&&tts[0], tts.size_);
// for (int32_t i = 0; i < points.nrow_; ++i) {
// tt_homo(tts, sta_locs, points.row(i), velocity);
// // tts2.set_data(&tts[0]);
// // out[i] = slant_stack(data, tts, ix_keep, src_time, sr);
// out[i] = slant_stack_no_check(data, tts, ix_keep, src_time, sr);
// }
// }
// void search_grid(Array2D<float>& data, Array2D<float>& locs,
// Grid& grid, size_t gix_start, size_t gix_end,
// size_t nt_search, float vsr,
// float* win_val, size_t* win_loc)
// {
// size_t nchan = data.nrow_;
// auto tt_ixs = Vector<size_t>(nchan);
// auto output = Vector<float>(nt_search);
// float src_loc[3];
// float *dptr = nullptr;
// float dist;
// printf("Searching grid points: %lu to %lu\n", gix_start, gix_end);
// for (size_t ipt = gix_start; ipt < gix_end; ++ipt)
// {
// // if (ipt % 1000 == 0) {printf("Point: %d / %d\n", ipt, (int) gix_end);}
// // grid.get_point(ipt, src_loc);
// if (ipt % 1000 == 0) {
// printf("Progress: %.2f \n", ((float)(ipt - gix_start) / (gix_end - gix_start) * 100));
// }
// grid.get_point(ipt, src_loc);
// beamform::tt_homo_ix(locs, src_loc, vsr, tt_ixs);
// output.fill(0);
// // For each channel add time comb values to output
// for (size_t i = 0; i < nchan; ++i)
// {
// dptr = data.row(i) + tt_ixs[i];
// for (size_t j = 0; j < nt_search; j++)
// {
// output[j] += dptr[j];
// }
// }
// for (size_t j = 0; j < nt_search; ++j)
// {
// if (std::abs(output[j]) > std::abs(win_val[j])) {
// win_val[j] = output[j];
// win_loc[j] = ipt;
// }
// }
// }
// }
// void search_grid_parallel(std::vector<size_t>& parts, Array2D<float>& data, Array2D<float>& locs, Grid& grid, size_t nt_search, float vsr, Array2D<float>& win_val, Array2D<size_t>& win_loc)
// {
// std::vector<std::thread> pool;
// for (size_t i = 0; i < parts.size() - 1; i++){
// pool.push_back(std::thread([=, &data, &locs, &grid, &win_val, &win_loc] {search_grid(data, locs, grid, parts[i], parts[i + 1], nt_search, vsr, win_val.row(i), win_loc.row(i));}));
// }
// for(auto& thread : pool) thread.join();
// }
}
#endif
|
GB_binop__plus_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__plus_uint32
// A.*B function (eWiseMult): GB_AemultB__plus_uint32
// A*D function (colscale): GB_AxD__plus_uint32
// D*A function (rowscale): GB_DxB__plus_uint32
// C+=B function (dense accum): GB_Cdense_accumB__plus_uint32
// C+=b function (dense accum): GB_Cdense_accumb__plus_uint32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_uint32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_uint32
// C=scalar+B GB_bind1st__plus_uint32
// C=scalar+B' GB_bind1st_tran__plus_uint32
// C=A+scalar GB_bind2nd__plus_uint32
// C=A'+scalar GB_bind2nd_tran__plus_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x + y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_UINT32 || GxB_NO_PLUS_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__plus_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__plus_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__plus_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB_bind1st_tran__plus_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB_bind2nd_tran__plus_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__div_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__div_uint8
// A.*B function (eWiseMult): GB_AemultB__div_uint8
// A*D function (colscale): GB_AxD__div_uint8
// D*A function (rowscale): GB_DxB__div_uint8
// C+=B function (dense accum): GB_Cdense_accumB__div_uint8
// C+=b function (dense accum): GB_Cdense_accumb__div_uint8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_uint8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_uint8
// C=scalar+B GB_bind1st__div_uint8
// C=scalar+B' GB_bind1st_tran__div_uint8
// C=A+scalar GB_bind2nd__div_uint8
// C=A'+scalar GB_bind2nd_tran__div_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_UNSIGNED (x, y, 8) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_UINT8 || GxB_NO_DIV_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__div_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__div_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__div_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__div_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__div_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__div_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__div_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__div_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__div_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = GB_IDIV_UNSIGNED (x, bij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__div_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = GB_IDIV_UNSIGNED (aij, y, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 8) ; \
}
GrB_Info GB_bind1st_tran__div_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 8) ; \
}
GrB_Info GB_bind2nd_tran__div_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
base_serialized.h | #include "callback.h"
#include <omp.h>
int main()
{
unsigned int i;
#pragma omp parallel for num_threads(1) schedule(SCHEDULE)
for (i = 0; i < 1; i++) {
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_loop_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_loop_end'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=1, parallel_function=0x{{[0-f]+}}, invoker={{.+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], workshare_function=0x{{[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
7b843bd01_so4.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
#include <stdio.h>
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
double section2;
};
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r47_vec, float **restrict r48_vec, const int time, const int tw);
int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*r17)[y_size + 1][z_size + 1];
posix_memalign((void **)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r18)[y_size + 1][z_size + 1];
posix_memalign((void **)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r19)[y_size + 1][z_size + 1];
posix_memalign((void **)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r20)[y_size + 1][z_size + 1];
posix_memalign((void **)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r21)[y_size + 1][z_size + 1];
posix_memalign((void **)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float **r47;
posix_memalign((void **)&r47, 64, sizeof(float *) * nthreads);
float **r48;
posix_memalign((void **)&r48, 64, sizeof(float *) * nthreads);
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int xb_size = block_sizes[0];
int sf = 2;
int t_blk_size = 2 * sf * (time_M - time_m);
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
posix_memalign((void **)&r47[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1]));
posix_memalign((void **)&r48[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1]));
}
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(static, 1)
for (int x = x_m - 1; x <= x_M; x += 1)
{
for (int y = y_m - 1; y <= y_M; y += 1)
{
#pragma omp simd aligned(delta, phi, theta : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]);
r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]);
r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]);
r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]);
r17[x + 1][y + 1][z + 1] = sqrt(2 * delta[x + 4][y + 4][z + 4] + 1);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m-1 ; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size)
{
//printf(" Change of outer xblock %d \n", xb);
for (int yb = y_m-1 ; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size)
{
for (int time = t_blk, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
bf0(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x0_blk0_size, x_size, y0_blk0_size, y_size, z_size, t0, t1, t2, x_M , x_m, y_M , y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, (float **)r47, (float **)r48, time, tw);
// x_M - (x_M - x_m + 1)%(x0_blk0_size), x_m, y_M - (y_M - y_m + 1)%(y0_blk0_size), y_m,
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
}
}
}
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
free(r47[tid]);
free(r48[tid]);
}
free(r17);
free(r18);
free(r19);
free(r20);
free(r21);
free(r47);
free(r48);
return 0;
}
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r47_vec, float **restrict r48_vec, const int time, const int tw)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data;
float(*restrict r17)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r17_vec;
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
float **r47 = (float **)r47_vec;
float **r48 = (float **)r48_vec;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
if (x0_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
float(*restrict r34)[y0_blk0_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 1][z_size + 1]) r47[tid];
float(*restrict r35)[y0_blk0_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 1][z_size + 1]) r48[tid];
#pragma omp for collapse(1) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0 - 1, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++)
{
for (int y = y0_blk0 - 1, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++)
{
//printf(" bf0 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 4, y - time + 4, xs, ys);
#pragma omp simd aligned(u, v : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
float r39 = -u[t0][x - time + 4][y - time + 4][z + 4];
r34[xs][ys][z + 1] = 1.0e-1F * (-(r39 + u[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r39 + u[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r39 + u[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
float r40 = -v[t0][x - time + 4][y - time + 4][z + 4];
r35[xs][ys][z + 1] = 1.0e-1F * (-(r40 + v[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r40 + v[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r40 + v[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
}
}
}
for (int x = x0_blk0, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++)
{
for (int y = y0_blk0, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 4, y - time + 4, xs, ys);
#pragma omp simd aligned(damp, epsilon, u, v, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
float r46 = 1.0 / dt;
float r45 = 1.0 / (dt * dt);
float r44 = r18[x - time + 1][y - time + 1][z] * r35[xs + 1][ys + 1][z] - r18[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1] + r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r35[xs + 1][ys][z + 1] - r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1] + r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r35[xs][ys + 1][z + 1] - r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1];
float r43 = 1.0 / (vp[x - time + 4][y - time + 4][z + 4] * vp[x - time + 4][y - time + 4][z + 4]);
float r42 = 1.0e-1F * (-r18[x - time + 1][y - time + 1][z] * r34[xs + 1][ys + 1][z] + r18[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1] - r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r34[xs + 1][ys][z + 1] + r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1] - r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r34[xs][ys + 1][z + 1] + r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1]) - 8.33333315e-4F * (u[t0][x - time + 2][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 2][z + 4] + u[t0][x - time + 4][y - time + 4][z + 2] + u[t0][x - time + 4][y - time + 4][z + 6] + u[t0][x - time + 4][y - time + 6][z + 4] + u[t0][x - time + 6][y - time + 4][z + 4]) + 1.3333333e-2F * (u[t0][x - time + 3][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 3][z + 4] + u[t0][x - time + 4][y - time + 4][z + 3] + u[t0][x - time + 4][y - time + 4][z + 5] + u[t0][x - time + 4][y - time + 5][z + 4] + u[t0][x - time + 5][y - time + 4][z + 4]) - 7.49999983e-2F * u[t0][x - time + 4][y - time + 4][z + 4];
float r41 = 1.0 / (r43 * r45 + r46 * damp[x - time + 1][y - time + 1][z + 1]);
float r32 = r45 * (-2.0F * u[t0][x - time + 4][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 4][z + 4]);
float r33 = r45 * (-2.0F * v[t0][x - time + 4][y - time + 4][z + 4] + v[t1][x - time + 4][y - time + 4][z + 4]);
u[t2][x - time + 4][y - time + 4][z + 4] = r41 * ((-r32) * r43 + r42 * (2 * epsilon[x - time + 4][y - time + 4][z + 4] + 1) + 1.0e-1F * r44 * r17[x - time + 1][y - time + 1][z + 1] + r46 * (damp[x - time + 1][y - time + 1][z + 1] * u[t0][x - time + 4][y - time + 4][z + 4]));
v[t2][x - time + 4][y - time + 4][z + 4] = r41 * ((-r33) * r43 + r42 * r17[x - time + 1][y - time + 1][z + 1] + 1.0e-1F * r44 + r46 * (damp[x - time + 1][y - time + 1][z + 1] * v[t0][x - time + 4][y - time + 4][z + 4]));
}
int sp_zi_M = nnz_sp_source_mask[x-time][y-time] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1)
{
int zind = sp_source_mask[x-time][y-time][sp_zi];
float r22 = save_src_u[tw][source_id[x-time][y-time][zind]] * source_mask[x-time][y-time][zind];
u[t2][x -time + 4][y -time + 4][zind + 4] += r22;
float r23 = save_src_v[tw][source_id[x-time][y-time][zind]] * source_mask[x-time][y-time][zind];
v[t2][x-time + 4][y-time + 4][zind + 4] += r23;
printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 4, y - time + 4, zind + 4, r22, r23);
}
}
}
}
}
}
}
|
vednnLaunch.h | #pragma once
#include "vednn.h"
#ifdef VEDNN_USE_OPENMP
#include <omp.h>
#endif
#include <algorithm>
/** TODO: Also 2d parallelize Linear. Be careful, there are calls that require
* the OC to be a even number! */
//------------------------------------------------------------------------------
template<typename F>
inline vednnError_t vednn_launch_1d(const int cnt, F func) {
int rc = VEDNN_SUCCESS;
if(cnt == 1) {
return func(0, cnt);
} else {
#pragma omp parallel reduction(|:rc)
{
int nthreads = omp_get_num_threads();
int tcnt = (cnt + nthreads - 1) / nthreads;
int tx = omp_get_thread_num();
int min = tx * tcnt;
int max = std::min((tx+1) * tcnt, cnt);
if(min < max)
rc |= (int)func(min, max);
}
}
return (vednnError_t)rc;
}
//------------------------------------------------------------------------------
template<typename F>
inline vednnError_t vednn_launch_2d(const int x, const int y, F func) {
int rc = VEDNN_SUCCESS;
int cnt = x * y;
if(cnt == 1) {
return func(0, x, 0, y);
} else {
#pragma omp parallel reduction(|:rc)
{
int nthreads = omp_get_num_threads();
int tx = omp_get_thread_num();
if(x > (nthreads/2)) {
int xcnt = (x + nthreads - 1) / nthreads;
int min_x = tx * xcnt;
int max_x = std::min((tx+1) * xcnt, x);
if(min_x < max_x)
rc |= func(min_x, max_x, 0, y);
} else {
int xthreads = x;
int ythreads = nthreads / x;
int ty = tx / xthreads;
tx = tx % xthreads;
int min_x = tx % xthreads;
int max_x = min_x + 1;
int ycnt = (y + ythreads - 1) / ythreads;
int min_y = ty * ycnt;
int max_y = std::min((ty+1) * ycnt, y);
if(min_y < max_y)
rc |= func(min_x, max_x, min_y, max_y);
}
}
}
return (vednnError_t)rc;
}
//------------------------------------------------------------------------------
|
op_a.c | /*
Compute A(X).
*/
#include <stdlib.h>
#include "declarations.h"
void op_a(k,constraints,X,result)
int k;
struct constraintmatrix *constraints;
struct blockmatrix X;
double *result;
{
int i,j;
int p,q;
int blk;
double ent;
double *mat;
double *vec;
int nume;
struct sparseblock *ptr;
double contrib;
#pragma omp parallel for schedule(dynamic,64) default(none) private(i,contrib,ptr,blk,nume,vec,j,p,mat,ent,q) shared(k,constraints,result,X)
for (i=1; i<=k; i++)
{
result[i]=0.0;
contrib=0.0;
ptr=constraints[i].blocks;
while (ptr != NULL)
{
blk=ptr->blocknum;
nume=ptr->numentries;
if (X.blocks[blk].blockcategory == DIAG)
{
vec=X.blocks[blk].data.vec;
for (j=1; j<=nume; j++)
{
ent=ptr->entries[j];
p=ptr->iindices[j];
contrib += ent*vec[p];
};
}
else
{
mat=X.blocks[blk].data.mat;
for (j=1; j<=nume; j++)
{
ent=ptr->entries[j];
p=ijtok(ptr->iindices[j],ptr->jindices[j],ptr->blocksize);
q=ijtok(ptr->jindices[j],ptr->iindices[j],ptr->blocksize);
if (p == q)
{
contrib += ent*mat[p];
}
else
{
contrib += ent*(mat[p]+mat[q]);
};
};
};
ptr=ptr->next;
};
result[i] += contrib;
};
}
|
gen_input.c | #include <time.h>
#include <stdlib.h>
#include <stdio.h>
#ifdef FP_NUMBER
typedef double FP_NUMBER;
#else
typedef float FP_NUMBER;
#endif
#define GET_RAND_FP \
((FP_NUMBER)rand() / ((FP_NUMBER)(RAND_MAX) + (FP_NUMBER)(1)))
char L_FNAME[32], U_FNAME[32], A_FNAME[32];
int main(int argc, char** argv) {
int i, j, k, MatrixDim;
FP_NUMBER sum, **L, **U, **A;
FILE *fl, *fu, *fa;
if (argc < 2) {
printf("./gen_input [Matrix_Dimension_size]\n");
return 1;
}
MatrixDim = atoi(argv[1]);
L = (FP_NUMBER**)malloc(sizeof(FP_NUMBER*) * MatrixDim);
U = (FP_NUMBER**)malloc(sizeof(FP_NUMBER*) * MatrixDim);
A = (FP_NUMBER**)malloc(sizeof(FP_NUMBER*) * MatrixDim);
if (!L || !U || !A) {
printf("Can not allocate memory\n");
if (L)
free(L);
if (U)
free(U);
if (A)
free(A);
return 1;
}
srand(time(NULL));
sprintf(L_FNAME, "l-%d.dat", MatrixDim);
fl = fopen(L_FNAME, "wb");
if (fl == NULL) {
printf("Cannot open file %s\n", L_FNAME);
return 1;
}
sprintf(U_FNAME, "u-%d.dat", MatrixDim);
fu = fopen(U_FNAME, "wb");
if (fu == NULL) {
printf("Cannot open file %s\n", U_FNAME);
return 1;
}
sprintf(A_FNAME, "%d.dat", MatrixDim);
fa = fopen(A_FNAME, "wb");
if (!fa) {
printf("Cannot open file %s\n", A_FNAME);
return 1;
}
for (i = 0; i < MatrixDim; i++) {
L[i] = (FP_NUMBER*)malloc(sizeof(FP_NUMBER) * MatrixDim);
U[i] = (FP_NUMBER*)malloc(sizeof(FP_NUMBER) * MatrixDim);
A[i] = (FP_NUMBER*)malloc(sizeof(FP_NUMBER) * MatrixDim);
}
#if 1
#pragma omp parallel for default(none) private(i, j) shared(L, U, MatrixDim)
#endif
for (i = 0; i < MatrixDim; i++) {
for (j = 0; j < MatrixDim; j++) {
if (i == j) {
L[i][j] = 1.0;
U[i][j] = GET_RAND_FP;
} else if (i < j) {
L[i][j] = 0;
U[i][j] = GET_RAND_FP;
} else { // i > j
L[i][j] = GET_RAND_FP;
U[i][j] = 0;
}
}
}
#if 1
#pragma omp parallel for default(none) private(i, j, k, sum) shared(L, U, A, \
MatrixDim)
#endif
for (i = 0; i < MatrixDim; i++) {
for (j = 0; j < MatrixDim; j++) {
sum = 0;
for (k = 0; k < MatrixDim; k++)
sum += L[i][k] * U[k][j];
A[i][j] = sum;
}
}
for (i = 0; i < MatrixDim; i++) {
for (j = 0; j < MatrixDim; j++)
fprintf(fl, "%f ", L[i][j]);
fprintf(fl, "\n");
}
fclose(fl);
for (i = 0; i < MatrixDim; i++) {
for (j = 0; j < MatrixDim; j++)
fprintf(fu, "%f ", U[i][j]);
fprintf(fu, "\n");
}
fclose(fu);
fprintf(fa, "%d\n", MatrixDim);
for (i = 0; i < MatrixDim; i++) {
for (j = 0; j < MatrixDim; j++)
fprintf(fa, "%f ", A[i][j]);
fprintf(fa, "\n");
}
fclose(fa);
for (i = 0; i < MatrixDim; i++) {
free(L[i]);
free(U[i]);
free(A[i]);
}
free(L);
free(U);
free(A);
return 0;
}
|
ex07.c | #include <stdio.h>
#include <omp.h>
static long num_steps = 1000000;
double step;
int main(int argv, char* argc)
{
int i;
double x, pi, sum = 0.0;
step = 1.0 / (double) num_steps;
double startTime = omp_get_wtime();
#pragma omp parallel
{
double x;
#pragma omp for reduction (+:sum)
for (i = 0; i < num_steps; i++)
{
x = (i + 0.5) * step;
sum += 4.0 / (1.0 + x * x);
}
}
pi = step * sum;
double endTime = omp_get_wtime();
printf ("Computed integral: %f\n", pi);
printf ("Time elapsed: %f secs\n", (endTime - startTime));
return 0;
} |
simple_fft.h | /******************************************************************************
* Simple FFT (Cooley Tukey Radix-4)
*
* Copyright (c) 2015 OK Ojisan(Takuya OKAHISA)
* Released under the MIT license
* http://opensource.org/licenses/mit-license.php
******************************************************************************/
#ifndef simple_fft_h
#define simple_fft_h
#include <utility>
#include "otfft/otfft_misc.h"
namespace SimpleFFT { /////////////////////////////////////////////////////////
using namespace OTFFT_MISC;
#ifdef DO_SINGLE_THREAD
const int OMP_THRESHOLD = 1<<30;
#else
const int OMP_THRESHOLD = 1<<15;
#endif
void fwdbut(int N, complex_t* const x, const complex_t* const W)
{
int n = N;
for (int s = 1; n > 2; n /= 4, s *= 4) {
const int n0 = 0;
const int n1 = n / 4;
const int n2 = n / 2;
const int n3 = n1 + n2;
if (N < OMP_THRESHOLD) {
for (int q = 0; q < N; q += n) {
complex_t* const xq = x + q;
for (int p = 0; p < n1; p++) {
const int sp = s*p;
const complex_t w1p = W[1*sp];
const complex_t w2p = W[2*sp];
const complex_t w3p = W[3*sp];
const complex_t a = xq[p+n0];
const complex_t b = xq[p+n1];
const complex_t c = xq[p+n2];
const complex_t d = xq[p+n3];
const complex_t apc = a + c;
const complex_t amc = a - c;
const complex_t bpd = b + d;
const complex_t jbmd = jx(b - d);
xq[p+n0] = apc + bpd;
xq[p+n1] = (apc - bpd) * w2p;
xq[p+n2] = (amc - jbmd) * w1p;
xq[p+n3] = (amc + jbmd) * w3p;
}
}
}
else {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int q = 0; q < N; q += n) {
complex_t* const xq = x + q;
for (int p = 0; p < n1; p++) {
const int sp = s*p;
const complex_t w1p = W[1*sp];
const complex_t w2p = W[2*sp];
const complex_t w3p = W[3*sp];
const complex_t a = xq[p+n0];
const complex_t b = xq[p+n1];
const complex_t c = xq[p+n2];
const complex_t d = xq[p+n3];
const complex_t apc = a + c;
const complex_t amc = a - c;
const complex_t bpd = b + d;
const complex_t jbmd = jx(b - d);
xq[p+n0] = apc + bpd;
xq[p+n1] = (apc - bpd) * w2p;
xq[p+n2] = (amc - jbmd) * w1p;
xq[p+n3] = (amc + jbmd) * w3p;
}
}
}
}
if (n == 2) {
if (N < OMP_THRESHOLD) {
for (int q = 0; q < N; q += 2) {
complex_t* const xq = x + q;
const complex_t a = xq[0];
const complex_t b = xq[1];
xq[0] = a + b;
xq[1] = a - b;
}
}
else {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int q = 0; q < N; q += 2) {
complex_t* const xq = x + q;
const complex_t a = xq[0];
const complex_t b = xq[1];
xq[0] = a + b;
xq[1] = a - b;
}
}
}
}
void invbut(int N, complex_t* const x, const complex_t* const W)
{
int n = N;
for (int s = 1; n > 2; n /= 4, s *= 4) {
const int n0 = 0;
const int n1 = n / 4;
const int n2 = n / 2;
const int n3 = n1 + n2;
if (N < OMP_THRESHOLD) {
for (int q = 0; q < N; q += n) {
complex_t* const xq = x + q;
for (int p = 0; p < n1; p++) {
const int sp = s*p;
const complex_t w1p = W[N-1*sp];
const complex_t w2p = W[N-2*sp];
const complex_t w3p = W[N-3*sp];
const complex_t a = xq[p+n0];
const complex_t b = xq[p+n1];
const complex_t c = xq[p+n2];
const complex_t d = xq[p+n3];
const complex_t apc = a + c;
const complex_t amc = a - c;
const complex_t bpd = b + d;
const complex_t jbmd = jx(b - d);
xq[p+n0] = apc + bpd;
xq[p+n1] = (apc - bpd) * w2p;
xq[p+n2] = (amc + jbmd) * w1p;
xq[p+n3] = (amc - jbmd) * w3p;
}
}
}
else {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int q = 0; q < N; q += n) {
complex_t* const xq = x + q;
for (int p = 0; p < n1; p++) {
const int sp = s*p;
const complex_t w1p = W[N-1*sp];
const complex_t w2p = W[N-2*sp];
const complex_t w3p = W[N-3*sp];
const complex_t a = xq[p+n0];
const complex_t b = xq[p+n1];
const complex_t c = xq[p+n2];
const complex_t d = xq[p+n3];
const complex_t apc = a + c;
const complex_t amc = a - c;
const complex_t bpd = b + d;
const complex_t jbmd = jx(b - d);
xq[p+n0] = apc + bpd;
xq[p+n1] = (apc - bpd) * w2p;
xq[p+n2] = (amc + jbmd) * w1p;
xq[p+n3] = (amc - jbmd) * w3p;
}
}
}
}
if (n == 2) {
if (N < OMP_THRESHOLD) {
for (int q = 0; q < N; q += 2) {
complex_t* const xq = x + q;
const complex_t a = xq[0];
const complex_t b = xq[1];
xq[0] = a + b;
xq[1] = a - b;
}
}
else {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int q = 0; q < N; q += 2) {
complex_t* const xq = x + q;
const complex_t a = xq[0];
const complex_t b = xq[1];
xq[0] = a + b;
xq[1] = a - b;
}
}
}
}
struct FFT
{
const int N;
simd_array<complex_t> weight;
simd_array<int> table;
complex_t* const W;
int* bitrev;
FFT(int n) : N(n), weight(n+1), table(n), W(&weight), bitrev(&table)
{
init_W(N, W);
bitrev[0] = 0; bitrev[N-1] = N-1;
for (int i = 0, j = 1; j < N-1; j++) {
for (int k = N >> 1; k > (i ^= k); k >>= 1);
bitrev[j] = i;
}
}
///////////////////////////////////////////////////////////////////////////
void fwd(complex_t* const x) const
{
const double rN = 1.0/N;
fwdbut(N, x, W);
if (N < OMP_THRESHOLD) {
for (int p = 0; p < N; p++) {
x[p] *= rN;
const int q = bitrev[p];
if (p > q) std::swap(x[p], x[q]);
}
}
else
#ifdef _OPENMP
#pragma omp parallel
#endif
{
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int p = 0; p < N; p++) {
const int q = bitrev[p];
if (p > q) std::swap(x[p], x[q]);
}
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif
for (int p = 0; p < N; p++) x[p] *= rN;
}
}
void fwd0(complex_t* const x) const
{
fwdbut(N, x, W);
if (N < OMP_THRESHOLD) {
for (int p = 0; p < N; p++) {
const int q = bitrev[p];
if (p > q) std::swap(x[p], x[q]);
}
}
else {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int p = 0; p < N; p++) {
const int q = bitrev[p];
if (p > q) std::swap(x[p], x[q]);
}
}
}
void fwdn(complex_t* const x) const { fwd(x); }
///////////////////////////////////////////////////////////////////////////
void inv(complex_t* const x) const
{
invbut(N, x, W);
if (N < OMP_THRESHOLD) {
for (int p = 0; p < N; p++) {
const int q = bitrev[p];
if (p > q) std::swap(x[p], x[q]);
}
}
else {
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif
for (int p = 0; p < N; p++) {
const int q = bitrev[p];
if (p > q) std::swap(x[p], x[q]);
}
}
}
void inv0(complex_t* const x) const { inv(x); }
void invn(complex_t* const x) const
{
const double rN = 1.0/N;
invbut(N, x, W);
if (N < OMP_THRESHOLD) {
for (int p = 0; p < N; p++) {
x[p] *= rN;
const int q = bitrev[p];
if (p > q) std::swap(x[p], x[q]);
}
}
else
#ifdef _OPENMP
#pragma omp parallel
#endif
{
#ifdef _OPENMP
#pragma omp for schedule(static)
#endif
for (int p = 0; p < N; p++) {
const int q = bitrev[p];
if (p > q) std::swap(x[p], x[q]);
}
#ifdef _OPENMP
#pragma omp for schedule(static) nowait
#endif
for (int p = 0; p < N; p++) x[p] *= rN;
}
}
};
} /////////////////////////////////////////////////////////////////////////////
#endif // simple_fft_h
|
ordered.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
// UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7
#include "callback.h"
#include <omp.h>
int main()
{
#pragma omp ordered
{
print_current_address(1);
print_ids(0);
}
print_current_address(2);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_nest_lock'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_wait_ordered: wait_id=[[WAIT_ID:[0-9]+]], hint={{[0-9]+}}, impl={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_acquired_ordered: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_release_ordered: wait_id=[[WAIT_ID]], codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]
// CHECK-NEXT: {{^}}[[MASTER_ID]]: current_address={{.*}}[[RETURN_ADDRESS]]
return 0;
}
|
samples.h | #ifndef __SAMPLES_H
#define __SAMPLES_H
#include <iostream>
#include <fstream>
#include <boost/archive/binary_iarchive.hpp>
#include <boost/archive/binary_oarchive.hpp>
#include "distribution.h"
#include "mixture_model.h"
#define ENABLE_ALL 1
namespace jmm {
template<int t_dims, typename Scalar>
class Samples {
public:
using Vectord = Eigen::Matrix<Scalar, t_dims, 1>;
using Normal = Eigen::Matrix<Scalar, 3, 1>;
using Color = Eigen::Matrix<Scalar, 3, 1>;
Samples() : m_capacity(0), m_end(0) {}
using SampleVector = Eigen::Matrix<Scalar, t_dims, Eigen::Dynamic>;
using NormalsVector = Eigen::Matrix<Scalar, 3, Eigen::Dynamic>;
using ColorVector = Eigen::Matrix<Scalar, 3, Eigen::Dynamic>;
using ScalarVector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
using BooleanVector = Eigen::Matrix<bool, Eigen::Dynamic, 1>;
SampleVector samples;
ScalarVector samplingPdfs;
ScalarVector learnedPdfs;
ScalarVector heuristicPdfs;
ScalarVector heuristicWeights;
ScalarVector weights;
ColorVector colorWeights;
BooleanVector isDiffuse;
NormalsVector normals;
ScalarVector stateDensities;
ScalarVector curvatures;
ScalarVector rewards;
ScalarVector discounts;
// ColorVector functionValues;
// ScalarVector bsdfWeights;
// SampleVector nextSamples;
// ScalarVector nextHeuristicPdfs;
// ScalarVector nextHeuristicWeights;
// From RenderingSamples:
// Eigen::Matrix<Scalar, Eigen::Dynamic, 1> denoisedWeights;
// std::vector<Point2> sensorPositions;
// Eigen::Matrix<int, 1, Eigen::Dynamic> depths;
// std::vector<Spectrum> throughputs;
Vectord meanPosition = Vectord::Zero();
Vectord meanSquarePosition = Vectord::Zero();
Normal meanNormal = Normal::Zero();
Normal meanSquareNormal = Normal::Zero();
Scalar nSamples = 0.f;
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
Samples(const Samples& other) {
meanPosition = other.meanPosition;
meanSquarePosition = other.meanSquarePosition;
meanNormal = other.meanNormal;
meanSquareNormal = other.meanSquareNormal;
nSamples = other.nSamples;
m_end = other.m_end;
m_capacity = other.m_capacity;
m_totalSamplesCount = other.m_totalSamplesCount;
samples = other.samples;
samplingPdfs = other.samplingPdfs;
learnedPdfs = other.learnedPdfs;
heuristicPdfs = other.heuristicPdfs;
heuristicWeights = other.heuristicWeights;
weights = other.weights;
isDiffuse = other.isDiffuse;
normals = other.normals;
curvatures = other.curvatures;
#if ENABLE_ALL == 1
colorWeights = other.colorWeights;
rewards = other.rewards;
discounts = other.discounts;
#endif // ENABLE_ALL == 1
// functionValues = std::move(other.functionValues);
// bsdfWeights = std::move(other.bsdfWeights);
// nextSamples = std::move(other.nextSamples);
// nextHeuristicPdfs = std::move(other.nextHeuristicPdfs);
// nextHeuristicWeights = std::move(other.nextHeuristicWeights);
}
Samples(Samples&& other) {
meanPosition = other.meanPosition;
meanSquarePosition = other.meanSquarePosition;
meanNormal = other.meanNormal;
meanSquareNormal = other.meanSquareNormal;
nSamples = other.nSamples;
m_end = other.m_end;
m_capacity = other.m_capacity;
m_totalSamplesCount = other.m_totalSamplesCount;
samples = std::move(other.samples);
samplingPdfs = std::move(other.samplingPdfs);
learnedPdfs = std::move(other.learnedPdfs);
heuristicPdfs = std::move(other.heuristicPdfs);
heuristicWeights = std::move(other.heuristicWeights);
weights = std::move(other.weights);
colorWeights = std::move(other.colorWeights);
isDiffuse = std::move(other.isDiffuse);
normals = std::move(other.normals);
curvatures = std::move(other.curvatures);
rewards = std::move(other.rewards);
discounts = std::move(other.discounts);
// functionValues = std::move(other.functionValues);
// bsdfWeights = std::move(other.bsdfWeights);
// nextSamples = std::move(other.nextSamples);
// nextHeuristicPdfs = std::move(other.nextHeuristicPdfs);
// nextHeuristicWeights = std::move(other.nextHeuristicWeights);
}
Samples& operator=(const Samples& other) {
meanPosition = other.meanPosition;
meanSquarePosition = other.meanSquarePosition;
meanNormal = other.meanNormal;
meanSquareNormal = other.meanSquareNormal;
nSamples = other.nSamples;
m_end = other.m_end;
m_capacity = other.m_capacity;
m_totalSamplesCount = other.m_totalSamplesCount;
samples = other.samples;
samplingPdfs = other.samplingPdfs;
learnedPdfs = other.learnedPdfs;
heuristicPdfs = other.heuristicPdfs;
heuristicWeights = other.heuristicWeights;
weights = other.weights;
colorWeights = other.colorWeights;
isDiffuse = other.isDiffuse;
normals = other.normals;
curvatures = other.curvatures;
rewards = other.rewards;
discounts = other.discounts;
// functionValues = other.functionValues;
// bsdfWeights = other.bsdfWeights;
// nextSamples = other.nextSamples;
// nextHeuristicPdfs = other.nextHeuristicPdfs;
// nextHeuristicWeights = other.nextHeuristicWeights;
return *this;
}
Samples& operator=(Samples&& other) {
meanPosition = other.meanPosition;
meanSquarePosition = other.meanSquarePosition;
meanNormal = other.meanNormal;
meanSquareNormal = other.meanSquareNormal;
nSamples = other.nSamples;
m_end = other.m_end;
m_capacity = other.m_capacity;
m_totalSamplesCount = other.m_totalSamplesCount;
samples = std::move(other.samples);
samplingPdfs = std::move(other.samplingPdfs);
learnedPdfs = std::move(other.learnedPdfs);
heuristicPdfs = std::move(other.heuristicPdfs);
heuristicWeights = std::move(other.heuristicWeights);
weights = std::move(other.weights);
colorWeights = std::move(other.colorWeights);
isDiffuse = std::move(other.isDiffuse);
normals = std::move(other.normals);
curvatures = std::move(other.curvatures);
rewards = std::move(other.rewards);
discounts = std::move(other.discounts);
// functionValues = std::move(other.functionValues);
// bsdfWeights = std::move(other.bsdfWeights);
// nextSamples = std::move(other.nextSamples);
// nextHeuristicPdfs = std::move(other.nextHeuristicPdfs);
// nextHeuristicWeights = std::move(other.nextHeuristicWeights);
return *this;
}
virtual void reserve(int size) {
samples.conservativeResize(Eigen::NoChange, size);
samplingPdfs.conservativeResize(size);
learnedPdfs.conservativeResize(size);
heuristicPdfs.conservativeResize(size);
heuristicWeights.conservativeResize(size);
weights.conservativeResize(size);
colorWeights.conservativeResize(Eigen::NoChange, size);
isDiffuse.conservativeResize(size);
normals.conservativeResize(Eigen::NoChange, size);
stateDensities.conservativeResize(size);
curvatures.conservativeResize(size);
rewards.conservativeResize(size);
discounts.conservativeResize(size);
// functionValues.conservativeResize(Eigen::NoChange, size);
// bsdfWeights.conservativeResize(size);
// nextSamples.conservativeResize(Eigen::NoChange, size);
// nextHeuristicPdfs.conservativeResize(size);
// nextHeuristicWeights.conservativeResize(size);
m_capacity = size;
if(size < m_end) {
m_end = size;
}
}
void normalizeWeights() {
weights.topRows(m_end) /= weights.topRows(m_end).sum();
}
void clampWeights(Scalar clamping) {
weights.topRows(m_end) = weights.topRows(m_end).cwiseMin(clamping);
}
void clear() {
m_end = 0;
m_totalSamplesCount = 0;
// meanPosition = Vectord::Zero();
// meanSquarePosition = Vectord::Zero();
// nSamples = 0;
}
template<typename ...FwdArgs>
void push_back_synchronized(
FwdArgs&& ...fwdArgs
) {
boost::unique_lock<boost::mutex> lock(mutex);
push_back(std::forward<FwdArgs>(fwdArgs) ...);
}
virtual bool push_back(
const Vectord& sample,
// Color functionValue,
Scalar samplingPdf,
Scalar learnedPdf,
Scalar heuristicPdf,
Scalar heuristicWeight,
Scalar weight,
Color colorWeight,
bool _isDiffuse = 0,
const Normal& normal = Normal::Zero(),
Scalar curvature = 0,
Scalar reward = 0,
Scalar discount = 0
// Scalar bsdfWeight = 0,
// const Vectord& nextSample = Vectord::Zero(),
// Scalar nextHeuristicPdf = 0,
// Scalar nextHeuristicWeight = 0
) {
++m_totalSamplesCount;
if(weight == 0) {
return false;
}
nSamples += 1;
meanPosition += sample;
meanSquarePosition.array() += sample.array().square();
meanNormal += normal;
meanSquareNormal.array() += normal.array().square();
if(m_end == m_capacity) {
// reserve(2 * m_end);
// std::cerr << "WARNING: Resizing sample array.\n";
return false;
// std::cerr << "m_end=" << m_end << " ... " << "samples.cols()=" << samples.cols() << "\n";
}
samples.col(m_end) = sample;
samplingPdfs(m_end) = samplingPdf;
learnedPdfs(m_end) = learnedPdf;
heuristicPdfs(m_end) = heuristicPdf;
heuristicWeights(m_end) = heuristicWeight;
weights(m_end) = weight;
colorWeights.col(m_end) = colorWeight;
isDiffuse(m_end) = _isDiffuse;
normals.col(m_end) = normal;
curvatures(m_end) = curvature;
rewards(m_end) = reward;
discounts(m_end) = discount;
// functionValues(m_end) = functionValue;
// bsdfWeights(m_end) = bsdfWeight;
// nextSamples.col(m_end) = nextSample;
// nextHeuristicPdfs(m_end) = nextHeuristicPdf;
// nextHeuristicWeights(m_end) = nextHeuristicWeight;
// averageWeight = (averageWeight * (Scalar) m_end + weight) / ((Scalar) m_end + 1.f);
++m_end;
return true;
}
void push_back(const Samples<t_dims, Scalar>& other, int sample_i) {
push_back(
other.samples.col(sample_i),
other.samplingPdfs(sample_i),
other.learnedPdfs(sample_i),
other.heuristicPdfs(sample_i),
other.heuristicWeights(sample_i),
other.weights(sample_i),
other.colorWeights.col(sample_i),
other.isDiffuse(sample_i),
other.normals.col(sample_i),
other.curvatures(sample_i),
other.rewards(sample_i),
other.discounts(sample_i)
// other.functionValues(sample_i),
// other.bsdfWeights(sample_i),
// other.nextSamples.col(sample_i),
// other.nextHeuristicPdfs(sample_i),
// other.nextHeuristicWeights(sample_i),
);
}
void push_back(const Samples<t_dims, Scalar>& other) {
// if(m_capacity < m_end + other.m_end) {
// reserve(2 * (m_end + other.m_end));
// }
for(int sample_i = 0; sample_i < other.m_end; ++sample_i) {
push_back(other, sample_i);
}
}
Samples prioritizedSample(
const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& error,
const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& randomUniform,
Scalar proportionDone
) {
Scalar priority = 0.7;
Scalar isWeightDecay = 0.3;
// isWeightDecay += (1 - isWeightDecay) * proportionDone;
int bufferSize = error.rows();
int nSamples = randomUniform.rows();
assert(bufferSize == m_end);
std::cerr << "Sampling nSamples: " << nSamples << "\n";
Samples<t_dims, Scalar> drawn;
drawn.reserve(nSamples);
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> pdf = error.array().max(0);
pdf.array() = pdf.array().pow(priority);
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> cdf(bufferSize, 1);
createCdfEigen(pdf, cdf, true);
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> isWeights =
((Scalar) nSamples / ((Scalar) bufferSize * pdf.array())).pow(isWeightDecay);
isWeights.array() /= isWeights.maxCoeff();
std::cerr << "Nan isWeights finite: " << isWeights.array().isFinite().all() << '\n';
#pragma omp parallel for
for(int sample_i = 0; sample_i < nSamples; ++sample_i) {
int sample_j = sampleDiscreteCdf(cdf, randomUniform(sample_i));
drawn.push_back(*this, sample_j);
drawn.weights(sample_i) *= isWeights(sample_j);
}
return std::move(drawn);
}
void russianRoulette(
const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& randomUniform,
int nRemainingSamples,
bool correctWeights
) {
// Scalar sum = weights.topRows(m_end).sum();
Scalar mean = weights.topRows(m_end).mean();
// Scalar std = std::sqrt(
// (weights.topRows(m_end).array() - mean).square().sum() / ((Scalar) m_end - 1)
// );
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> priorities(size(), 1);
// priorities.setConstant((Scalar) nRemainingSamples / (Scalar) size());
priorities = weights.topRows(m_end);
// priorities.array() *= nRemainingSamples / mean;
// std::cerr << randomUniform.rows() << "vs." << m_end << std::endl;
int oldEnd = m_end;
m_end = 0;
for(int sample_i = 0; sample_i < oldEnd; ++sample_i) {
// if(weights(sample_i) / mean < 1.f) {
// continue;
// }
Scalar keepProbability = std::min(Scalar(1), std::max(Scalar(0.1), priorities(sample_i)));
// std::cerr << keepProbability << "\n";
// TERMINATE
if(randomUniform(sample_i) > keepProbability) {
continue;
}
if(correctWeights) {
weights(sample_i) /= keepProbability;
}
push_back(*this, sample_i);
}
// std::cerr << "RR removed " << oldEnd - m_end << "/" << oldEnd << " samples (remaining=" << m_end << ").\n";
}
void save(const std::string& filename) const {
// make an archive
std::ofstream ofs(filename.c_str());
boost::archive::binary_oarchive oa(ofs);
oa << BOOST_SERIALIZATION_NVP(*this);
}
void load(const std::string& filename) {
// open the archive
std::ifstream ifs(filename.c_str());
boost::archive::binary_iarchive ia(ifs);
ia >> BOOST_SERIALIZATION_NVP(*this);
}
int size() const {
return m_end;
}
int capacity() const {
return m_capacity;
}
int totalSamplesCount() const {
return m_totalSamplesCount;
}
void setSize(int size) {
m_end = size;
}
protected:
int m_capacity = 0;
int m_end = 0;
int m_totalSamplesCount = 0;
boost::mutex mutex;
friend class boost::serialization::access;
template<class Archive>
void serialize(Archive & ar, const unsigned int version)
{
reserve(m_end);
ar & m_capacity;
ar & m_end;
ar & samples;
ar & samplingPdfs;
ar & learnedPdfs;
ar & heuristicPdfs;
ar & heuristicWeights;
ar & weights;
ar & colorWeights;
ar & isDiffuse;
ar & stateDensities;
ar & curvatures;
// ar & functionValues;
// ar & bsdfWeights;
// ar & nextSamples;
// ar & rewards;
// ar & discounts;
// ar & nextHeuristicPdfs;
// ar & nextHeuristicWeights;
}
};
}
#endif /* __SAMPLES_H */
|
contactmortar.c | /* CalculiX - A 3-dimensional finite element program */
/* Copyright (C) 1998-2019 Guido Dhondt */
/* This program is free software; you can redistribute it and/or */
/* modify it under the terms of the GNU General Public License as */
/* published by the Free Software Foundation(version 2); */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* You should have received a copy of the GNU General Public License */
/* along with this program; if not, write to the Free Software */
/* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include "CalculiX.h"
#include "mortar.h"
#define max(a,b) (((a) > (b)) ? (a) : (b))
#define min(a,b) (((a) < (b)) ? (a) : (b))
/**
* \brief function to include contact conditions with the dual mortar method in the transformed system
* see phd-thesis Sitzmann, Algorithm 2, p.71
*
* Author: Saskia Sitzmann
*
* @param [in] ncont number of triangles in triagulation of master side
* @param [in] ntie number of ties
* @param [in] tieset (1,i) name of tie constraint (2,i) dependent surface (3,i) independent surface
* @param [in] nset number of sets
* @param [in] set (i) name of set i
* @param [in] istartset (i) pointer to ialset containing the first set member
* @param [in] iendset (i) pointer to ialset containing the last set member
* @param [in] ialset set members
* @param [in] itietri (1,i) first triangle in field koncont for contact contraint i (2,i) last one
* @param [in] lakon (i) label for element i
* @param [in] ipkon pointer into field kon...
* @param [in] kon .. for element i storing the connectivity list of elem. in succ. order
* @param [in] koncont connectivity for master surface triangulation
* @param [in] ne number of elements
* @param [in] cg center of gravity for slave faces
* @param [in] straight plane equations for master surface triangulation
* @param [in] co coordinates of nodes
* @param [in] vold displacement of nodes
* @param [in] ielmat (j,i) material number of layer j
* @param [in] elcon material parameters
* @param [in] istep step number
* @param [in] iinc increment number
* @param [in] iit iteration number of Newton-Raphson iteration
* @param [in] ncmat_ maximum number of elastic material constants
* @param [in] ntmat_ maximum number of temperature data points for any material
* @param [in] ne0 number of elements without contact elements
* @param [in] vini displacements at the start of the increment
* @param [in] nmethod analysis method
* @param [in] neq number of active degrees of freedom
* @param [in] nzs number of nonzero,nondiagonal entries of matrix K
* @param [in] nactdof (i,j) actual degree of freedom for direction i of node j
* @param [in] itiefac pointer into field islavsurf: (1,i) beginning slave_i (2,i) end of slave_i
* @param [in] islavsurf islavsurf(1,i) slaveface i islavsurf(2,i) # integration points generated before looking at face i
* @param [in] islavnode field storing the nodes of the slave surface
* @param [in] imastnode field storing the nodes of the master surfaces
* @param [in] nslavnode (i)pointer into field isalvnode for contact tie i
* @param [in] nmastnode (i)pointer into field imastnode for contact tie i
* @param [in,out] ad diagonal terms of system matrix K
* @param [in,out] aup system matrix K
* @param [in,out] b right hand side of linear system
* @param [in,out] irowp rows for system matrix K
* @param [in,out] icol colums for systme matrix K
* @param [in,out] jq pointer to irow
* @param [in] imastop connectivty of master triangulation jumping from triangle to triange via common edge
* @param [in] iponoels (i) pointer to inoels
* @param [in] inoels (3,i) element number, local node number and pointer to another entry
* @param [out] nzsc number of nonzero,nondiagonal entries of intermediate system matrix
* @param [out] aucp intermediate system matrix
* @param [out] adc intermediate system matrix, diagonal terms
* @param [out] irowcp rows for intermediate system matrix
* @param [out] jqc pointer to irowc
* @param [in,out] islavact (i) indicates, if slave node i is active (=-3 no-slave-node, =-2 no-LM-node, =-1 no-gap-node, =0 inactive node, =1 sticky node, =2 slipping/active node)
* @param [in,out] gap (i) gap for node i on slave surface
* @param [in,out] slavnor slave normal
* @param [in,out] slavtan slave tangent
* @param [out] bhat intermediate right hand side
* @param [out] irowbdp field containing row numbers of aubd
* @param [out] jqbd pointer into field irowbd
* @param [out] aubdp coupling matrix \f$ B_d[nactdof(i,p),nactdof(j,q)]\f$ for all active degrees od freedoms
* @param [out] irowbdtilp field containing row numbers of aubd
* @param [out] jqbdtil pointer into field irowbdtil
* @param [out] aubdtilp matrix \f$ \tilde{D}^{-1}\tilde{B}_d[nactdof(i,p),nactdof(j,q)]\f$ for all active degrees od freedoms
* @param [out] irowbdtil2p field containing row numbers of aubdtil2
* @param [out] jqbdtil2 pointer into field irowbdtil2
* @param [out] aubdtil2p coupling matrix \f$ \tilde{D}$ and $\tilde{B}^2_d[nactdof(i,p),nactdof(j,q)]\f$ for all active degrees od freedoms
* @param [out] irowddp field containing row numbers of audd
* @param [out] jqdd pointer into field irowdd
* @param [out] auddp coupling matrix \f$ D_d[nactdof(i,p),nactdof(j,q)]\f$ for all active degrees od freedoms
* @param [out] irowddtilp field containing row numbers of audd
* @param [out] jqddtil pointer into field irowdd
* @param [out] auddtilp coupling matrix \f$ \tilde{D}_d[nactdof(i,p),nactdof(j,q)]\f$ for all active degrees od freedoms
* @param [out] irowddtil2p field containing row numbers of audd
* @param [out] jqddtil2 pointer into field irowdd
* @param [out] auddtil2p matrix \f$ Id_d[nactdof(i,p),nactdof(j,q)]\f$ for all active degrees od freedoms
* @param [out] irowddinvp field containing row numbers of auddinv
* @param [out] jqddinv pointer into field irowddinv
* @param [out] auddinvp coupling matrix \f$ \tilde{D}^{-1}_d[nactdof(i,p),nactdof(j,q)]\f$ for all active degrees od freedoms
* @param [in] irowtloc field containing row numbers of autloc
* @param [in] jqtloc pointer into field irowtloc
* @param [in] autloc transformation matrix \f$ T[p,q]\f$ for slave nodes \f$ p,q \f$
* @param [in] irowtlocinv field containing row numbers of autlocinv
* @param [in] jqtlocinv pointer into field irowtlocinv
* @param [in] autlocinv transformation matrix \f$ T^{-1}[p,q]\f$ for slave nodes \f$ p,q \f$
* @param [in] mi (1) max # of integration points per element (2) max degree of freedom per element
* @param [in] ipe (i) pointer to ime for node i
* @param [in] ime ... cataloging the edges with node i
* @param [in] tietol (1,i) tie tolerance (2,i) contant interaction material definition
* @param [in] iflagact here: flag indicating if coupling matrices should be updated every iteration or only once per increment (==0)
* @param [in] cstress current Lagrange multiplier
* @param [in] cstressini Lagrange multiplier at start of the increment
* @param [in] bp_old old friction bounds
* @param [in] iflag_fric flag indicating if iwan friction model is used
* @param [in] nk number of nodes
* @param [in] nboun number of SPCs
* @param [in] ndirboun (i) direction of SPC i
* @param [in] nodeboun (i) node of SPC i
* @param [in] xboun (i) value of SPC i
* @param [in] nmpc number of mpcs
* @param [in] ipompc (i) pointer to nodempc and coeffmpc for MPC i
* @param [in] nodempc nodes and directions of MPCs
* @param [in] coefmpc coefficients of MPCs
* @param [in] ikboun sorted dofs idof=8*(node-1)+dir for SPCs
* @param [in] ilboun SPC numbers for sorted dofs
* @param [in] ikmpc sorted dofs idof=8*(node-1)+dir for MPCs
* @param [in] ilmpc SPC numbers for sorted dofs
* @param [in] nboun2 number of transformed SPCs
* @param [in] ndirboun2 (i) direction of transformed SPC i
* @param [in] nodeboun2 (i) node of transformed SPC i
* @param [in] xboun2 (i) value of transformed SPC i
* @param [in] nmpc2 number of transformed mpcs
* @param [in] ipompc2 (i) pointer to nodempc and coeffmpc for transformed MPC i
* @param [in] nodempc2 nodes and directions of transformed MPCs
* @param [in] coefmpc2 coefficients of transformed MPCs
* @param [in] ikboun2 sorted dofs idof=8*(node-1)+dir for transformed SPCs
* @param [in] ilboun2 transformed SPC numbers for sorted dofs
* @param [in] ikmpc2 sorted dofs idof=8*(node-1)+dir for transformed MPCs
* @param [in] ilmpc2 transformed SPC numbers for sorted dofs
* @param [in] nslavspc (2*i) pointer to islavspc...
* @param [in] islavspc ... which stores SPCs for slave node i
* @param [in] nsspc number of SPC for slave nodes
* @param [in] nslavmpc (2*i) pointer to islavmpc...
* @param [in] islavmpc ... which stores MPCs for slave node i
* @param [in] nsmpc number of MPC for slave nodes
* @param [in] nslavspc2 (2*i) pointer to islavspc2...
* @param [in] islavspc2 ... which stores transformed SPCs for slave node i
* @param [in] nsspc2 number of transformed SPC for slave nodes
* @param [in] nslavmpc2 (2*i) pointer to islavmpc2...
* @param [in] islavmpc2 ... which stores transformed MPCs for slave node i
* @param [in] nsmpc2 number of transformed MPC for slave nodes
* @param [in] nmastspc (2*i) pointer to imastspc...
* @param [in] imastspc ... which stores SPCs for master node i
* @param [in] nmspc number of SPC for master nodes
* @param [in] nmastmpc (2*i) pointer to imastmpc...
* @param [in] imastmpc ... which stores MPCs for master node i
* @param [in] nmmpc number of MPC for master nodes
* @param [in] pslavdual (:,i)coefficients \f$ \alpha_{ij}\f$, \f$ 1,j=1,..8\f$ for dual shape functions for face i
* @param [in] pslavdualpg (:,i)coefficients \f$ \alpha_{ij}\f$, \f$ 1,j=1,..8\f$ for Petrov-Galerkin shape functions for face i
* @param [in] islavactdof (i)=10*slavenodenumber+direction for active dof i
* @param [in] islavactdoftie (i)=tie number for active dof i
* @param [in] plicon isotropic hardening curve or points for pressure-overclosure=tabular
* @param [in] nplicon isotropic hardening curve.
* @param [in] npmat_ maximum number of data points for plicon
* @param [in] nelcon (1,i) number of elastic constants for material i (2,i) number of temperature points
* @param [in] dtime delta time
* @param [in] islavnodeinv (i) slave node index for node i
* @param [out] Bdp coupling matrix \f$ B_d[p,q]=\int \psi_p \phi_q dS \f$, \f$ p \in S, q \in M \f$
* @param [out] irowbp field containing row numbers of Bd
* @param [out] jqb pointer into field irowb
* @param [out] Bdhelpp coupling matrix \f$ Bhelp_d[p,q]=\tilde{D}^{-1}\tilde{B}\f$, \f$ p \in S, q \in M \f$
* @param [out] irowbhelpp field containing row numbers of Bdhelp
* @param [out] jqbhelp pointer into field irowbhelp
* @param [out] Ddp coupling matrix \f$ D_d[p,q]=\int \psi_p \phi_q dS \f$, \f$ p,q \in S \f$
* @param [out] irowdp field containing row numbers of Dd
* @param [out] jqd pointer into field irowd
* @param [out] Ddtilp coupling matrix \f$ \tilde{D}_d[p,q]=\int \psi_p \tilde{\phi}_q dS \f$, \f$ p,q \in S \f$
* @param [out] irowdtilp field containing row numbers of Ddtil
* @param [out] jqdtil pointer into field irowdtil
* @param [out] Bdtilp coupling matrix \f$ \tilde{B}_d[p,q]=\int \psi_p \tilde{\phi}_q dS \f$, \f$ p \in S, q \in M \f$
* @param [out] irowbtilp field containing row numbers of Bdtil
* @param [out] jqbtil pointer into field irowbtil
* @param [out] Bpgdp Petrov-Galerkin coupling matrix \f$ B_d^{PG}[p,q]=\int \tilde{\phi}_p \phi_q dS \f$, \f$ p \in S, q \in M \f$
* @param [out] irowbpgp field containing row numbers of Bpgd
* @param [out] jqbpg pointer into field irowbpg
* @param [out] Dpgdp Petrov-Galerkin coupling matrix \f$ D_d[p,q]=\int \tilde{\phi}_p \phi_q dS \f$, \f$ p,q \in S \f$
* @param [out] irowdpgp field containing row numbers of Dpgd
* @param [out] jqdpg pointer into field irowdpg
* @param [out] Dpgdtilp transformed Petrov-Galerkin coupling matrix \f$ D_d[p,q]=\int \tilde{\phi}_p \tilde{\phi}_q dS \f$, \f$ p,q \in S \f$
* @param [out] irowdpgtilp field containing row numbers of Dpgdtil
* @param [out] jqdpgtil pointer into field irowdpgtil
* @param [out] Bpgdtilp transformed Petrov-Galerkin coupling matrix \f$ B_d^{PG}[p,q]=\int \tilde{\phi}_p \tilde{\phi}_q dS \f$, \f$ p \in S, q \in M \f$
* @param [out] irowbpgtilp field containing row numbers of Bpgdtil
* @param [out] jqbpgtil pointer into field irowbpgtil
* @param [in] lambdaiwan Lagrange multiplier splitted to Iwan elements
* @param [in] lambdaiwanini Lagrange multiplier splitted to Iwan elements at start of increment
* @param [in] bet parameter used in alpha-method
* @param [in] iflagdualquad flag indicating what mortar contact is used (=1 quad-lin, =2 quad-quad, =3 PG quad-lin, =4 PG quad-quad)
* @param [in] labmpc2
* @param [in,out] cfsinitil \f$ \tilde{\Phi}_{c,j}\f$ contact forces from last increment, needed for dynamic calculations
* @param [in] reltime relative step time, needed for shrink
*/
void contactmortar(ITG *ncont, ITG *ntie, char *tieset, ITG *nset, char *set,
ITG *istartset, ITG *iendset, ITG *ialset, ITG *itietri,
char *lakon, ITG *ipkon, ITG *kon, ITG *koncont, ITG *ne,
double *cg, double *straight, double *co,
double *vold, ITG *ielmat,double *elcon,
ITG *istep,ITG *iinc,ITG *iit,ITG *ncmat_,ITG *ntmat_,
ITG *ne0, double *vini,
ITG *nmethod,ITG *neq, ITG *nzs, ITG *nactdof, ITG *itiefac,
ITG *islavsurf, ITG *islavnode, ITG *imastnode,
ITG *nslavnode, ITG *nmastnode, double *ad,
double **aup, double *b, ITG **irowp, ITG *icol, ITG *jq, ITG *imastop,
ITG *iponoels, ITG *inoels, ITG *nzsc, double **aucp,
double *adc, ITG **irowcp, ITG *jqc, ITG *islavact,
double *gap,
double *slavnor,double *slavtan,
double *bhat,
ITG **irowbdp, ITG *jqbd, double **aubdp,
ITG **irowbdtilp, ITG *jqbdtil,double **aubdtilp,
ITG **irowbdtil2p, ITG *jqbdtil2,double **aubdtil2p,
ITG **irowddp, ITG *jqdd,double **auddp,
ITG **irowddtilp, ITG *jqddtil,double **auddtilp,
ITG **irowddtil2p, ITG *jqddtil2,double **auddtil2p,
ITG **irowddinvp, ITG *jqddinv,double **auddinvp,
ITG *irowtloc, ITG *jqtloc,double *autloc,
ITG *irowtlocinv, ITG *jqtlocinv,double *autlocinv,
ITG *mi,ITG *ipe, ITG *ime,double *tietol,ITG *iflagact,double *cstress,
double *cstressini,double *bp_old,ITG *iflag_fric, ITG *nk,
ITG *nboun,ITG *ndirboun,ITG *nodeboun,double *xboun,
ITG *nmpc,ITG *ipompc,ITG *nodempc,double *coefmpc,
ITG *ikboun,ITG *ilboun,ITG *ikmpc,ITG *ilmpc,
ITG *nboun2,ITG *ndirboun2,ITG *nodeboun2,double *xboun2,
ITG *nmpc2,ITG *ipompc2,ITG *nodempc2,double *coefmpc2,
ITG *ikboun2,ITG *ilboun2,ITG *ikmpc2,ITG *ilmpc2,
ITG *nslavspc,ITG *islavspc,ITG *nsspc,ITG *nslavmpc,ITG *islavmpc,ITG *nsmpc,
ITG *nslavspc2,ITG *islavspc2,ITG *nsspc2,ITG *nslavmpc2,ITG *islavmpc2,ITG *nsmpc2,
ITG *nmastspc,ITG *imastspc,ITG *nmspc,ITG *nmastmpc,ITG *imastmpc,ITG *nmmpc,
ITG *nmastmpc2,ITG *imastmpc2,ITG *nmmpc2,
double *pslavdual,double *pslavdualpg,
ITG *islavactdof,
ITG *islavactdoftie,
double *plicon,ITG *nplicon, ITG *npmat_,ITG *nelcon, double *dtime,
ITG *islavnodeinv,
double **Bdp, ITG **irowbp,ITG *jqb,
double **Bdhelpp, ITG **irowbhelpp,ITG *jqbhelp,
double **Ddp, ITG **irowdp,ITG *jqd,
double **Ddtilp, ITG **irowdtilp,ITG *jqdtil,
double **Bdtilp, ITG **irowbtilp,ITG *jqbtil,
double **Bpgdp, ITG **irowbpgp,ITG *jqbpg,
double **Dpgdp, ITG **irowdpgp,ITG *jqdpg,
double **Dpgdtilp, ITG **irowdpgtilp,ITG *jqdpgtil,
double **Bpgdtilp, ITG **irowbpgtilp,ITG *jqbpgtil,
double *lambdaiwan,double *lambdaiwanini, double *bet, ITG *iflagdualquad,
char *labmpc2, double *cfsinitil,double *reltime, ITG *ithermal,
double *plkcon, ITG *nplkcon){
ITG i,j,k,numb,ntrimax,*nx=NULL,*ny=NULL,*nz=NULL,nintpoint=0,calcul_fn,calcul_f,
nzsbd,*irowbd=NULL,*irowdd=NULL,*irowddinv=NULL,*irowddtil=NULL,*irowbdtil=NULL,
nzs2,niwan,*irowddtil2=NULL,*irowbdtil2=NULL,
l,nstart,kflag,ntri,ii,number,regmode,derivmode,regmodet=1,
*irowc=NULL,*imastsurf=NULL,imastk2,num_cpus=1,
*irow=NULL,*irowqdt=NULL, *irowb=NULL,*irowbhelp=NULL, *irowd=NULL,*irowdtil=NULL, *irowbtil=NULL,
*irowbpg=NULL, *irowdpg=NULL,*irowdpgtil=NULL, *irowbpgtil=NULL,
debug,nacti, ninacti, nnogap,nstick,nnolm,nnoslav,nzsbdtil,nzsbdtil2;
double *xo=NULL,*yo=NULL,*zo=NULL,*x=NULL,*y=NULL,*z=NULL,*aubd=NULL,*cstresstil=NULL,scal,
*audd=NULL, *auddtil=NULL, *auddtil2=NULL, *auddinv=NULL,
*auc=NULL, *pmastsurf=NULL,*auqdt=NULL,*gapmints=NULL,
*au=NULL,*pslavsurf=NULL,*aubdtil=NULL,*aubdtil2=NULL,
*Bd=NULL,*Bdhelp=NULL, *Dd=NULL,*Ddtil=NULL, *Bdtil=NULL,
*Bpgd=NULL, *Dpgd=NULL,*Dpgdtil=NULL, *Bpgdtil=NULL,
*areaslav=NULL,*anull=NULL,mu,fkninv,fktauinv,p0,beta,*rs=NULL,*rsb=NULL, alpha,*fmpc=NULL;
double aninvloc,gnc,u_nold,u_told[2],nu_told,lnold,lt[2],nltold,constant=1.e10,atau2,sb,rb,resreg[2];
double *u_old=NULL,*u_oldt=NULL;
ITG mt=mi[1]+1,nodes,id,islavk2,dim,idof1,idof2,jj,kk;
clock_t debut;
clock_t fin;
irow = *irowp; au=*aup; auc=*aucp;irowc=*irowcp;
aubd=*aubdp; irowbd=*irowbdp;
aubdtil=*aubdtilp; irowbdtil=*irowbdtilp;
aubdtil2=*aubdtil2p; irowbdtil2=*irowbdtil2p;
irowdd = *irowddp; audd=*auddp;
irowddtil = *irowddtilp; auddtil=*auddtilp;
irowddtil2 = *irowddtil2p; auddtil2=*auddtil2p;
irowddinv = *irowddinvp; auddinv=*auddinvp;
irowb = *irowbp; Bd =*Bdp;irowbhelp = *irowbhelpp; Bdhelp =*Bdhelpp;
irowd = *irowdp; Dd =*Ddp;
irowdtil = *irowdtilp; Ddtil =*Ddtilp;
irowbtil = *irowbtilp; Bdtil =*Bdtilp;
irowbpg = *irowbpgp; Bpgd =*Bpgdp;
irowdpg = *irowdpgp; Dpgd =*Dpgdp;
irowdpgtil = *irowdpgtilp; Dpgdtil =*Dpgdtilp;
irowbpgtil = *irowbpgtilp; Bpgdtil =*Bpgdtilp;
debug=0;
NNEW(rs,double,(mt)**nk);
NNEW(rsb,double,neq[1]);
printf(" contactmortar: start\n");
/* coupling the slave degrees of freedom with the corresponding
slave nodes and doing the same for the master nodes */
FORTRAN(genislavactdof,(ntie,tieset,neq,nactdof,nslavnode,islavact,
nmastnode,imastnode,islavactdof,
islavnode,mi,ithermal));
/* right now iflagact is 1 in the first iteration of every increment and 0 for all subsequent iterations.
Thus the update of the normal and tangentials as well as the segmentation of the contact surface nedded
for the calculation of the coupling matrices is done only once per increment, since a combined fix-point
Newton approach in implemented, see phd-thesis Saskia Sitzmann, Chapter 3 introduction */
if(*iflagact==0){
/* update the location of the center of gravity of
the master triangles and the coefficients of their
bounding planes needed for the search algorithm in slavintmortar->neartriangle */
FORTRAN(updatecont,(koncont,ncont,co,vold,
cg,straight,mi));
/* determining the size of the auxiliary fields
(needed for the master triangle search for any
given location on the slave faces) */
ntrimax=0;
for(i=0;i<*ntie;i++){
if(itietri[2*i+1]-itietri[2*i]+1>ntrimax)
ntrimax=itietri[2*i+1]-itietri[2*i]+1;
}
//if ((*iinc==1)&&(*iit==1)){
/* For the first step, first increment, first iteration an initial guess for
the active set is generated analogous to node-to-surface penalty */
if ((*iinc==1)&&(*iit==1)&&(*istep==1)){
debut=clock();
NNEW(xo,double,ntrimax);
NNEW(yo,double,ntrimax);
NNEW(zo,double,ntrimax);
NNEW(x,double,ntrimax);
NNEW(y,double,ntrimax);
NNEW(z,double,ntrimax);
NNEW(nx,ITG,ntrimax);
NNEW(ny,ITG,ntrimax);
NNEW(nz,ITG,ntrimax);
NNEW(areaslav,double,itiefac[2*(*ntie-1)+1]);
ITG ifree=0;
FORTRAN(genfirstactif,(tieset,ntie,itietri,ne,ipkon,kon,lakon,
cg,straight,koncont,
co,vold,xo,yo,zo,x,y,z,nx,ny,nz,ielmat,elcon,istep,
iinc,iit,ncmat_,ntmat_,ne0,vini,nmethod,mi,
imastop,nslavnode,islavnode,islavsurf,itiefac,areaslav,iponoels,
inoels,set,nset,istartset,iendset,ialset,islavact,&ifree,
tietol));
printf("\tFrist Active Set : %" ITGFORMAT " nodes\n",ifree);
fin= clock();
printf("\tgenfirstactiv : %f s\n",((double)(fin-debut))/CLOCKS_PER_SEC);
SFREE(xo);SFREE(yo);SFREE(zo);SFREE(x);SFREE(y);SFREE(z);SFREE(nx);
SFREE(ny);SFREE(nz);
SFREE(areaslav);
}
fflush(stdout);
nacti=0; ninacti=0; nnogap=0;nstick=0;nnolm=0;nnoslav=0;
for (i=0;i<*ntie;i++){
if(tieset[i*(81*3)+80]=='C'){
for(j=nslavnode[i];j<nslavnode[i+1];j++){
if(islavact[j]<0){islavact[j]=-3;}
if(islavact[j]==2){nacti++;}
if(islavact[j]==0){ninacti++;}
if(islavact[j]==-3){nnoslav++;}
}
}
}
printf("\tcm: N_Activ: %" ITGFORMAT "\t N_stick: %" ITGFORMAT "\tN_Inactiv: %" ITGFORMAT "\t N_nogap: %" ITGFORMAT "\t N_nolm %" ITGFORMAT "\n",nacti,nstick,ninacti,nnogap,nnolm);
NNEW(xo,double,ntrimax);
NNEW(yo,double,ntrimax);
NNEW(zo,double,ntrimax);
NNEW(x,double,ntrimax);
NNEW(y,double,ntrimax);
NNEW(z,double,ntrimax);
NNEW(nx,ITG,ntrimax);
NNEW(ny,ITG,ntrimax);
NNEW(nz,ITG,ntrimax);
/* calculating the normals,tangents in the nodes of the slave
surface */
debut=clock();
FORTRAN(gencontrel,(tieset,ntie,itietri,ipkon,kon,
lakon,set,cg,straight,
koncont,co,vold,nset,
islavsurf,itiefac,
islavnode,nslavnode,slavnor,slavtan,mi));
fin= clock();
printf("\tgencontrel : %f s\n",((double)(fin-debut))/CLOCKS_PER_SEC);
/* Calculating the location of the matched slave/master
integration points, see phd-thesis Saskia Sitzmann, Appendix A */
debut=clock();
NNEW(imastsurf,ITG,66);
NNEW(gapmints,double,66);
NNEW(pmastsurf,double,132);
NNEW(pslavsurf,double,198);
islavsurf[1]=0;
//#pragma omp for
for(i=0;i<*ntie;i++){
ii=i+1;
if(tieset[i*(81*3)+80]=='C'){
nstart=itietri[2*i]-1;
ntri=(itietri[2*i+1]-nstart);
for(j=0;j<ntri;j++){
xo[j]=cg[(nstart+j)*3];
x[j]=xo[j];
nx[j]=j+1;
yo[j]=cg[(nstart+j)*3+1];
y[j]=yo[j];
ny[j]=j+1;
zo[j]=cg[(nstart+j)*3+2];
z[j]=zo[j];
nz[j]=j+1;
}
kflag=2;
FORTRAN(dsort,(x,nx,&ntri,&kflag));
FORTRAN(dsort,(y,ny,&ntri,&kflag));
FORTRAN(dsort,(z,nz,&ntri,&kflag));
for(l=itiefac[2*i];l<=itiefac[2*i+1];l++){
RENEW(imastsurf,ITG,nintpoint+ntri*8*7);
RENEW(gapmints,double,nintpoint+ntri*8*7);
RENEW(pmastsurf,double,2*(nintpoint+ntri*8*7));
RENEW(pslavsurf,double,3*(nintpoint+ntri*8*7));
FORTRAN(slavintmortar,(tieset,ntie,itietri,ipkon,kon,
lakon,set,cg,straight,&nintpoint,
koncont,co,vold,xo,yo,zo,x,y,z,nx,ny,nz,nset,
iinc,iit,
islavsurf,imastsurf,pmastsurf,itiefac,
islavnode,nslavnode,slavnor,slavtan,imastop,gapmints,
islavact,mi,ncont,ipe,ime,pslavsurf,&ii,&l,&ntri,tietol,
reltime,nmethod));
}
}
}
fin= clock();
printf("\tslavintmortar : %f s\n",((double)(fin-debut))/CLOCKS_PER_SEC);
printf("\tnumber of slave integration points = %" ITGFORMAT "\n",nintpoint);
if (nintpoint!=0){
RENEW(imastsurf,ITG,nintpoint);
}else{
RENEW(imastsurf,ITG,1);
}
if (nintpoint!=0){
RENEW(gapmints,double,nintpoint);
}else{
RENEW(gapmints,double,1);
}
if (nintpoint!=0){
RENEW(pmastsurf,double,2*nintpoint);
}else{
RENEW(pmastsurf,double,2);
}
if (nintpoint!=0){
RENEW(pslavsurf,double,3*nintpoint);
}else{
RENEW(pslavsurf,double,3);
}
SFREE(xo);SFREE(yo);SFREE(zo);SFREE(x);SFREE(y);SFREE(z);SFREE(nx);
SFREE(ny);SFREE(nz);
/* check SPC's and MPC's on slave nodes for compability and set all slave nodes involed in SPCs/MPCs to no-LM nodes */
debut=clock();
FORTRAN(checkspcmpc,(lakon,ipkon,kon,ntie,tieset,
islavnode,
imastnode,nslavnode,nmastnode,
slavnor,islavact,
nboun,ndirboun,nodeboun,xboun,
nmpc,ipompc,nodempc,coefmpc,
ikboun,ilboun,ikmpc,ilmpc,
nboun2,ndirboun2,nodeboun2,xboun2,
nmpc2,ipompc2,nodempc2,coefmpc2,
ikboun2,ilboun2,ikmpc2,ilmpc2,
nslavspc,islavspc,nsspc,nslavmpc,islavmpc,nsmpc,
nmastspc,imastspc,nmspc,nmastmpc,imastmpc,nmmpc));
fin= clock();
printf("\tcheckspcmpc : %f s\n",((double)(fin-debut))/CLOCKS_PER_SEC);
nacti=0; ninacti=0; nnogap=0;nstick=0;nnolm=0;nnoslav=0;
for (i=0;i<*ntie;i++){
if(tieset[i*(81*3)+80]=='C'){
for(j=nslavnode[i];j<nslavnode[i+1];j++){
if(islavact[j]==2){nacti++;}
if(islavact[j]==1){nstick++;}
if(islavact[j]==0){ninacti++;}
if(islavact[j]==-1){nnogap++;}
if(islavact[j]==-2){nnolm++;}
if(islavact[j]==-3){nnoslav++;}
}
}
}
printf("\tcm: N_Activ: %" ITGFORMAT "\t N_stick: %" ITGFORMAT "\tN_Inactiv: %" ITGFORMAT "\t N_nogap: %" ITGFORMAT "\t N_nolm: %" ITGFORMAT "\n",nacti,nstick,ninacti,nnogap,nnolm);
/* calculating the coeffs of dual basis functions (Sitzmann, Chapter 3.3.) and redistribute contributions of nogap
and noLM nodes other slave nodes (Sitzmann, Chapter 4.3.)*/
debut=clock();
FORTRAN(gendualcoeffs,(tieset,ntie,ipkon,kon,
lakon,
co,vold,
iinc,iit,islavact,
islavsurf,itiefac,
islavnode,nslavnode,
mi,pslavsurf,pslavdual,pslavdualpg,iflagdualquad));
fin= clock();
printf("\tgendualcoeffs : %f s\n",((double)(fin-debut))/CLOCKS_PER_SEC);
/* calculate all mortar coupling matrices as well as the dual gap via the segmentation of the slave surface */
nzsbd = 6*nslavnode[*ntie];
nzsbdtil=6*nslavnode[*ntie];
debut=clock();
bdfill(&irowbd, jqbd, &aubd, &nzsbd,
&irowbdtil, jqbdtil, &aubdtil,&nzsbdtil,
&irowbdtil2, jqbdtil2, &aubdtil2,&nzsbdtil2,
&irowdd, jqdd, &audd,
&irowddtil, jqddtil, &auddtil,
&irowddtil2, jqddtil2, &auddtil2,
&irowddinv, jqddinv, &auddinv,
irowtloc, jqtloc, autloc,
irowtlocinv, jqtlocinv, autlocinv,
ntie,
ipkon, kon, lakon, nslavnode, nmastnode, imastnode, islavnode,
islavsurf, imastsurf, pmastsurf, itiefac,tieset, neq, nactdof,co,vold,
iponoels, inoels,mi,gapmints,gap,pslavsurf,pslavdual,pslavdualpg,&nintpoint,slavnor,nk,
nboun,ndirboun,nodeboun,xboun,
nmpc,ipompc,nodempc,coefmpc,
ikboun,ilboun,ikmpc,ilmpc,
nboun2,ndirboun2,nodeboun2,xboun2,
nmpc2,ipompc2,nodempc2,coefmpc2,
ikboun2,ilboun2,ikmpc2,ilmpc2,
nslavspc,islavspc,nsspc,nslavmpc,islavmpc,nsmpc,
nslavspc2,islavspc2,nsspc2,nslavmpc2,islavmpc2,nsmpc2,
nmastspc,imastspc,nmspc,nmastmpc,imastmpc,nmmpc,
nmastmpc2,imastmpc2,nmmpc2,
iit, iinc,islavactdof,islavact,islavnodeinv,
&Bd,&irowb,jqb,
&Bdhelp,&irowbhelp,jqbhelp,
&Dd,&irowd,jqd,
&Ddtil,&irowdtil,jqdtil,
&Bdtil,&irowbtil,jqbtil,
&Bpgd,&irowbpg,jqbpg,
&Dpgd,&irowdpg,jqdpg,
&Dpgdtil,&irowdpgtil,jqdpgtil,
&Bpgdtil,&irowbpgtil,jqbpgtil,
iflagdualquad,ithermal);
fin= clock();
printf("\tbdfill : %f s\n",((double)(fin-debut))/CLOCKS_PER_SEC);
SFREE(imastsurf);SFREE(pmastsurf);SFREE(gapmints);SFREE(pslavsurf);
fflush(stdout);
}
/* get uhat_k-1 for first increment and first iteration**/
if(*iit==1){
NNEW(u_old,double,3*nslavnode[*ntie]);
NNEW(u_oldt,double,3*nslavnode[*ntie]);
NNEW(cstresstil,double,mt*nslavnode[*ntie]);
for (i=0;i<*ntie;i++){
if(tieset[i*(81*3)+80]=='C'){
for(j=nslavnode[i];j<nslavnode[i+1];j++){
nodes=islavnode[j];
for(jj=jqd[nodes-1]-1;jj<jqd[nodes-1+1]-1;jj++){
u_oldt[(islavnodeinv[irowd[jj]-1]-1)*3]+=Dd[jj]*(vold[mt*(nodes)-3]-vini[mt*(nodes)-3]);
u_oldt[(islavnodeinv[irowd[jj]-1]-1)*3+1]+=Dd[jj]*(vold[mt*(nodes)-2]-vini[mt*(nodes)-2]);
u_oldt[(islavnodeinv[irowd[jj]-1]-1)*3+2]+=Dd[jj]*(vold[mt*(nodes)-1]-vini[mt*(nodes)-1]);
u_old[(islavnodeinv[irowd[jj]-1]-1)*3]+=Dd[jj]*(vold[mt*(nodes)-3]);
u_old[(islavnodeinv[irowd[jj]-1]-1)*3+1]+=Dd[jj]*(vold[mt*(nodes)-2]);
u_old[(islavnodeinv[irowd[jj]-1]-1)*3+2]+=Dd[jj]*(vold[mt*(nodes)-1]);
}
}
}
}
for (i=0;i<*ntie;i++){
if(tieset[i*(81*3)+80]=='C'){
for(j=nmastnode[i];j<nmastnode[i+1];j++){
nodes=imastnode[j];
for(jj=jqb[nodes-1]-1;jj<jqb[nodes-1+1]-1;jj++){
u_oldt[(islavnodeinv[irowb[jj]-1]-1)*3]+=Bd[jj]*(vold[mt*(nodes)-3]-vini[mt*(nodes)-3]);
u_oldt[(islavnodeinv[irowb[jj]-1]-1)*3+1]+=Bd[jj]*(vold[mt*(nodes)-2]-vini[mt*(nodes)-2]);
u_oldt[(islavnodeinv[irowb[jj]-1]-1)*3+2]+=Bd[jj]*(vold[mt*(nodes)-1]-vini[mt*(nodes)-1]);
u_old[(islavnodeinv[irowb[jj]-1]-1)*3]+=Bd[jj]*(vold[mt*(nodes)-3]);
u_old[(islavnodeinv[irowb[jj]-1]-1)*3+1]+=Bd[jj]*(vold[mt*(nodes)-2]);
u_old[(islavnodeinv[irowb[jj]-1]-1)*3+2]+=Bd[jj]*(vold[mt*(nodes)-1]);
}
}
}
}
for (i=0;i<*ntie;i++){
if(tieset[i*(81*3)+80]=='C'){
for(j=nslavnode[i];j<nslavnode[i+1];j++){
nodes=islavnode[j];
for(jj=jqdtil[nodes-1]-1;jj<jqdtil[nodes-1+1]-1;jj++){
cstresstil[(islavnodeinv[irowdtil[jj]-1]-1)*(mt)]+=Ddtil[jj]*cstress[(islavnodeinv[nodes-1]-1)*(mt)+0];
cstresstil[(islavnodeinv[irowdtil[jj]-1]-1)*(mt)+1]+=Ddtil[jj]*cstress[(islavnodeinv[nodes-1]-1)*(mt)+1];
cstresstil[(islavnodeinv[irowdtil[jj]-1]-1)*(mt)+2]+=Ddtil[jj]*cstress[(islavnodeinv[nodes-1]-1)*(mt)+2];
}
}
}
}
}
nacti=0; ninacti=0; nnogap=0;nstick=0;nnolm=0;nnoslav=0;
for (i=0;i<*ntie;i++){
if(tieset[i*(81*3)+80]=='C'){
for(j=nslavnode[i];j<nslavnode[i+1];j++){
/* adjust active set for first iteration of first increment **/
if((*iit==1)){
u_told[0]=u_old[(j)*3+0]*slavtan[(j*6)+0]+u_old[(j)*3+1]*slavtan[(j*6)+1]+u_old[(j)*3+2]*slavtan[(j*6)+2];
u_told[1]=u_old[(j)*3+0]*slavtan[(j*6)+3]+u_old[(j)*3+1]*slavtan[(j*6)+4]+u_old[(j)*3+2]*slavtan[(j*6)+5];
nu_told=sqrt(u_told[0]*u_told[0]+u_told[1]*u_told[1]);
lnold=cstresstil[(j)*(mt)+0]*slavnor[(j*3)+0]+cstresstil[(j)*(mt)+1]*slavnor[(j*3)+1]+cstresstil[(j)*(mt)+2]*slavnor[(j*3)+2];
lt[0]=cstresstil[(j)*(mt)+0]*slavtan[(j*6)+0]+cstresstil[(j)*(mt)+1]*slavtan[(j*6)+1]+cstresstil[(j)*(mt)+2]*slavtan[(j*6)+2];
lt[1]=cstresstil[(j)*(mt)+0]*slavtan[(j*6)+3]+cstresstil[(j)*(mt)+1]*slavtan[(j*6)+4]+cstresstil[(j)*(mt)+2]*slavtan[(j*6)+5];
nltold=sqrt(lt[0]*lt[0]+lt[1]*lt[1]);
lnold=cstresstil[(j)*(mt)+0]*slavnor[(j*3)+0]+cstresstil[(j)*(mt)+1]*slavnor[(j*3)+1]+cstresstil[(j)*(mt)+2]*slavnor[(j*3)+2];
FORTRAN(getcontactparams,(&mu,®mode,®modet,&fkninv,&fktauinv,&p0,&beta,tietol,elcon,&i,ncmat_,ntmat_,&niwan));
derivmode=0;
if(islavact[j]>-1){scal=Ddtil[jqdtil[islavnode[j]-1]-1];}else{scal=0.0;}
FORTRAN(regularization_gn_c,(&lnold,&derivmode,®mode,&gnc,&aninvloc,&p0,&beta,elcon,nelcon,&i,ntmat_,
plicon,nplicon,npmat_,ncmat_,tietol,&scal));
if(mu>1.E-10){
bp_old[j]=mu*(lnold);
}else{
bp_old[j]=(lnold);
}
jj=j+1;
nltold=sqrt((lt[0])*(lt[0])+(lt[1])*(lt[1]));
// in case of NO friction node must set "slip"
if(mu>1.E-10 ){
if(*iinc==1){
if(islavact[j]==0 && gap[j]<1.e-9 ) {islavact[j]=1;}
if(islavact[j]>0 && bp_old[j]<1.e-14){bp_old[j]=1;}
if(regmodet==1){
if(islavact[j]>0 && nltold <1.e-5){islavact[j]=1;}// first step
}else{
if(islavact[j]==1){islavact[j]=2;}
}
}
}else{
if(*iinc==1){
if(gap[j]>1E-10 && islavact[j]>0 ){islavact[j]=0; bp_old[j]=0.0;}
if(gap[j]<1E-10 && islavact[j]==0 ){islavact[j]=2;}
if(islavact[j]==1){islavact[j]=2;}
}
}
}
if(islavact[j]==2){nacti++;}
if(islavact[j]==1){nstick++;}
if(islavact[j]==0){ninacti++;}
if(islavact[j]==-1){nnogap++;}
if(islavact[j]==-2){nnolm++;}
if(islavact[j]==-3){nnoslav++;}
}
}
}
if(*iinc==1 && *iit==1 && *istep==1 ){
// set initial value for bp_old in first iteration of first increment of first step
for (i=0;i<*ntie;i++){
if(tieset[i*(81*3)+80]=='C'){
for(j=nslavnode[i];j<nslavnode[i+1];j++){
bp_old[j]=1.0;
}
}
}
}
if(*iit==1){SFREE(u_old);SFREE(u_oldt);SFREE(cstresstil);}
printf("\tcm: N_Activ: %" ITGFORMAT "\t N_stick: %" ITGFORMAT "\tN_Inactiv: %" ITGFORMAT "\t N_nogap: %" ITGFORMAT "\t N_nolm: %" ITGFORMAT " N_noslav: %" ITGFORMAT "\n",nacti,nstick,ninacti,nnogap,nnolm,nnoslav);
/* modifying the stiffnes matrix K with the coupling matrices; the
expanded (symmetric) matrix is described in asymmetric form by
the fields auc, adc, irowc, jqc and nzsc, bhat */
nzsbd=jqbd[neq[1]]-1;
*nzsc = nzs[1];
debut=clock();
nzs2=nzs[1];
alpha = 1-2*sqrt(*bet);
/* alter mechanical part of residuum for dynamic calculations, see Sitzmann Chapter 5.1., Equation (5.18) */
if(*nmethod==4){
//if(*iinc==1 && *iit==1){
for(i=0;i<mt**nk;i++){cfsinitil[i]=0.0;}
for(i=0;i<*nk;i++){
for(j=jqdtil[i]-1;j<jqdtil[i+1]-1;j++){
for(l=0;l<3;l++){
cfsinitil[mt*(i+1)-3+l]+=Ddtil[j]*cstressini[mt*(islavnodeinv[irowdtil[j]-1]-1)+l];
}
}
}
for(i=0;i<*nk;i++){
for(j=jqbtil[i]-1;j<jqbtil[i+1]-1;j++){
for(l=0;l<3;l++){
cfsinitil[mt*(i+1)-3+l]+=Bdtil[j]*cstressini[mt*(islavnodeinv[irowbtil[j]-1]-1)+l];
}
}
}
//}
for(i=0;i<*nk*mt;i++){
rs[i]=cfsinitil[i]*(alpha);
}
NNEW(fmpc,double,*nmpc);
calcul_fn=1;
calcul_f=1;
resultsforc(nk,rsb,rs,nactdof,ipompc2,nodempc2,
coefmpc2,labmpc2,nmpc2,mi,fmpc,&calcul_fn,
&calcul_f,&num_cpus);
SFREE(fmpc);
for(i=0;i<neq[1];i++){b[i]+=rsb[i];}
}
/* modifying the stiffnes matrix K with the coupling matrices; embedding of the contact conditions
the expanded (symmetric) matrix is described in asymmetric form by
the fields auc, adc, irowc, jqc and nzsc, bhat */
/* k needed in semi-smooth Newton in tangential direction:
k=1 stick is assumed in all nodes, k=2 stick or slip is assummed
according to active set entry*/
if(*iit==1 && *iinc==1){k=1;}else{k=2;}
if(*iflagdualquad>2){
/* Petrov-Galerkin formulation (Sitzmann, Chapter 4.2.) */
multimortar2(&au, ad, &irow, jq, &nzs2,
&auc, adc, &irowc, jqc, nzsc,
aubd,irowbd,jqbd,
aubdtil,irowbdtil,jqbdtil,
aubdtil2,irowbdtil2,jqbdtil2,
irowdd, jqdd, audd,
irowddtil2, jqddtil2, auddtil2,
irowddinv, jqddinv, auddinv,
Bpgd,irowbpg,jqbpg,
Dpgd,irowdpg,jqdpg,
Ddtil,irowdtil,jqdtil,
neq,b,bhat,islavnode,imastnode,nslavnode,nmastnode,
islavact,islavactdof,
gap,
slavnor,slavtan,
vold,vini,cstress,cstressini,
bp_old,nactdof,ntie,mi,nk,
nboun,ndirboun,nodeboun,xboun,
nmpc,ipompc,nodempc,coefmpc,
ikboun,ilboun,ikmpc,ilmpc,
nslavspc,islavspc,nsspc,nslavmpc,islavmpc,nsmpc,
nmastspc,imastspc,nmspc,nmastmpc,imastmpc,nmmpc,
tieset,
islavactdoftie,nelcon,elcon,tietol,ncmat_,ntmat_,
plicon,nplicon,npmat_,dtime,
irowtloc, jqtloc, autloc,
irowtlocinv, jqtlocinv, autlocinv,
islavnodeinv,lambdaiwan,lambdaiwanini,&k,nmethod,bet,
ithermal,plkcon,nplkcon);
}else{
/* normal formulation (Sitzmann, Chapter 4.1.)*/
multimortar2(&au, ad, &irow, jq, &nzs2,
&auc, adc, &irowc, jqc, nzsc,
aubd,irowbd,jqbd,
aubdtil,irowbdtil,jqbdtil,
aubdtil2,irowbdtil2,jqbdtil2,
irowdd, jqdd, audd,
irowddtil2, jqddtil2, auddtil2,
irowddinv, jqddinv, auddinv,
Bd,irowb,jqb,
Dd,irowd,jqd,
Ddtil,irowdtil,jqdtil,
neq,b,bhat,islavnode,imastnode,nslavnode,nmastnode,
islavact,islavactdof,
gap,
slavnor,slavtan,
vold,vini,cstress,cstressini,
bp_old,nactdof,ntie,mi,nk,
nboun,ndirboun,nodeboun,xboun,
nmpc,ipompc,nodempc,coefmpc,
ikboun,ilboun,ikmpc,ilmpc,
nslavspc,islavspc,nsspc,nslavmpc,islavmpc,nsmpc,
nmastspc,imastspc,nmspc,nmastmpc,imastmpc,nmmpc,
tieset,
islavactdoftie,nelcon,elcon,tietol,ncmat_,ntmat_,
plicon,nplicon,npmat_,dtime,
irowtloc, jqtloc, autloc,
irowtlocinv, jqtlocinv, autlocinv,
islavnodeinv,lambdaiwan,lambdaiwanini,&k,nmethod,bet,
ithermal,plkcon,nplkcon);
}
fin= clock();
printf("\tmultimortar : %f s\n",((double)(fin-debut))/CLOCKS_PER_SEC);
number=10;
nzs[0]=jq[neq[1]]-1;
nzs[1]=jq[neq[1]]-1;
fflush(stdout);
debug=1;
/* calculating icol and icolc (needed for SPOOLES) */
for(i=0; i<neq[1]; i++){
icol[i] = jq[i+1]-jq[i];
}
/* nzlc is the number of the rightmost column with
nonzero off-diagonal terms */
number=10;
*irowp = irow; *aup=au;
*aucp=auc; *irowcp=irowc;
*aubdp=aubd; *irowbdp=irowbd;
*aubdtilp=aubdtil; *irowbdtilp=irowbdtil;
*aubdtil2p=aubdtil2; *irowbdtil2p=irowbdtil2;
*auddp=audd; *irowddp=irowdd;
*auddinvp=auddinv; *irowddinvp=irowddinv;
*auddtilp=auddtil; *irowddtilp=irowddtil;
*auddtil2p=auddtil2; *irowddtil2p=irowddtil2;
*Bdp = Bd; *irowbp= irowb;
*Bdhelpp = Bdhelp; *irowbhelpp= irowbhelp;
*Ddp = Dd; *irowdp= irowd;
*Ddtilp = Ddtil; *irowdtilp= irowdtil;
*Bdtilp = Bdtil; *irowbtilp= irowbtil;
*Bpgdp = Bpgd; *irowbpgp= irowbpg;
*Dpgdp = Dpgd; *irowdpgp= irowdpg;
*Dpgdtilp = Dpgdtil; *irowdpgtilp= irowdpgtil;
*Bpgdtilp = Bpgdtil; *irowbpgtilp= irowbpgtil;
SFREE(rs);SFREE(rsb);
fflush(stdout);
printf(" contactmortar: end\n\n");
//FORTRAN(stop,());
return;
}
|
ibm128-unsupported.c | // RUN: %clang_cc1 -triple powerpc64le -emit-llvm-bc -fopenmp %s \
// RUN: -fopenmp-targets=powerpc64le,x86_64 -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -triple x86_64 -aux-triple powerpc64le -fopenmp \
// RUN: -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc %s \
// RUN: -fsyntax-only
void foo(__ibm128 x); // expected-note {{'foo' defined here}}
void loop(int n, __ibm128 *arr) {
#pragma omp target parallel
for (int i = 0; i < n; ++i) {
// expected-error@+1 {{'foo' requires 128 bit size '__ibm128' type support, but device 'x86_64' does not support it}}
foo(arr[i]);
}
}
|
odf_fmt_plug.c | /* ODF cracker patch for JtR. Hacked together during Summer of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_odf;
#elif FMT_REGISTERS_H
john_register_one(&fmt_odf);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "arch.h"
#include "johnswap.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "sha.h"
#include "sha2.h"
#include <openssl/blowfish.h>
#include "aes.h"
#include "pbkdf2_hmac_sha1.h"
#include "memdbg.h"
#define FORMAT_LABEL "ODF"
#define FORMAT_NAME ""
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "SHA1/SHA256 " SHA1_ALGORITHM_NAME " BF/AES"
#else
#define ALGORITHM_NAME "SHA1/SHA256 BF/AES 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests odf_tests[] = {
{"$odf$*0*0*1024*16*df6c10f64d191a841812af53874b636d014ce3fe*8*07e28aff39d2660e*16*b124be9f3346fb77e0ebcc3bb80028f8*0*2276a1077f6a2a027bd565ce89824d6a20086e378876be05c4b8e3796a460e828c9803a692caf7a53492c220d1d7ecbf4e2d336c7abf5a7672acc804ca267318252cbc13676616d1fde38820f9fbeef1360067d9de096ba8c1032ae947bde1d0fedaf37b6020663d49faf36b7c095c5b9aae11c8fc2be74148f008edbdbb180b44028ad8259f1215b483542bf3027f56dee5f962448333b30f88e6ae4790b60d24abb286edff9adee831a4b3351fc47259043f0d683d7a25be7e47aff3aedca140005d866e218c8efcca32093c19bbece50bd96656d0f94a712d3c60d1e5342db86482fc73f05faf513ca0b137378126597b95986c372b412c953e97011259aab0839fe453c756559497a28ba88dce009e1e7980436131029d38e56a34f608e6471970d9959068808c898608024db9eb394c4feae7a364ea9272ec4ea2315a9f0407a4b27d5e49a8ab1e3ddce5c84927d5aecd7e68e4437a820ea8743c6b5b4e2abbb47b0001e2f77ceac4603e8774e4ccbc1adde794428c11ae4a7492727b620334302e63f72b0c06c1cf83800366916ee8295176819272d557863a831ee0a576841191482959aad69095831fa1d64e3e0e6f6c6a751bcdadf0fbaa27a17458709f708c04587cb208984c9525da6786e0e5aabefe30ad1dbbef66e85ce9d6dbe456fd85e4135de5cf16d9455976d7ca8de7b1b530661c74c0fae90c0fff1a2b5fcdfab19fcff75fadcec445ed8af6ab5babf1463e08458918be8045083de6db988c37e4be582cfac5cdf741d1f0322fb2902665c7ff347813348109e5d442e91fcb010c28f042da481e807084fcb4759b40ccf2cae77bad00cdfbfba4acf36aa1f74c30a315e3d7f1ca522b6306e8903352aafa51dc523d582d418934398d5eb88120e3656bfb640a239db507b285302a86855ea850ddc9af72fc62dc79336c9bc29ee8314c65adb0574e9c701d73d7fa977edd1d52a1ff2da5b8b94e1a0fdd01ffcc6583758f0a1f51750e45f12b58c6d38b140e5676cf3474224520ef7c52ca5e634f85456651f3d6f43d016ed7cc5da54ea640a3bc50c2b9d3dea8f93c0340d66ccd06efc5ae002108c33cf3a470c4a50f6a6ca2f11b8ad15511688c282b94ba6f1c332e239d10946dc46f763f08d12cb9edc1e79c0e07f7151f548e6d7d20ec13b52d911bf980cac60694e192651403c9a69abea045190e847be093fc9ba43fec55b32f77f5796ddca25b441f259d5c51e06df6c6588c6414899481ba9e06bcebec58f82ff3021b09c6beae13a5d22bc94870f72ab813d0c0be01d91f3d075192e7a5de765599d72244757d09539529a8347e077a36678166e5ed9f73a5aad2e147d8154095c397e3e5e4ba1987ca64c1301a0c6c3e438097ede9b701a105ec38fcb54abb31b367c7740cd9ac459e561094a34f01acee555e60267157e6", "test"},
{"$odf$*1*1*1024*32*61802eba18eab842de1d053809ba40927fd40b26c69ddeca6a8a652ed9c16a28*16*c5c0815b931f313627100d592a9c972f*16*e9a48b7daff738deaabe442007fb2ec4*0*be3b65ea09642c2b4fdc23e553e1f5304bc5df222b624c6373d53e674f5df01fdb8873cdab7a5a685fa45ad5441a9d8869401b7fa076c488ad53fd9971e97244ecc9416484450d4fb2ee4ec08af4044d7def937e6545dea2ce36bd5c57b1f46b11b9cf90c8fb3accff149ce2d54820b181b9124db9aac131f6436d77cf716423f04d42438eed6f9ca14bd24b9b17d3478176addd5fa0254bf986fccd879e326485790e28b94ad5306868734b5ac1b1ddb3f876382dee6e9428e8230e84bf11b7e85ccbae8b4b424cd73160c380f874b37fbe3c7e88c13ef4bde74b56507d17095c2c32bb8bcded0637e4403107bb33252f72f5886a91b7720fe32a8659a09c217717e4c74a7c2e09fc40b46aa288309a36e86b9f1856e1bce176bc9690555431e05c7b67ff95df64f8f40053079bfc9dda021ab2714fecf74398b867ebef675958f29eaa15eb631845e358a0c5caff0b824a2a69a6eabee069d3d6236d77709fd60438c9e3ad9e42b26810375e1e587eff105ac295327ef8bf66f6462388b7727ec32d6abde2f8d6126b185124bb437753663f6ab1f321ddfdb36d9f1f528729492e0b1bb8d3b9eda3c86c1997c92b902f5160f77587c37e45b5c133b5d9709fea910a2e9b54c0960b0ebc870cdbb858aabe07ed27cba86d29a7e64c6e3863131859314a14e64c1168d4a2d5ca0697853fb1fe969ba968e31359881d51edce287eff415de8e60cec2068bb82157fbcf0cf9a95e92cb23f32e6156daced4bee6ba8c8b41174d01fcd7662911bcc10d5b4478f8209ce3b91075d10529780be4f17e841a1f1833d432c3dc854908643e58b03c8860dfbc710a29f79f75ea262cfcef9cd67fb67d73f55b300d42f4577445af2b9f224620204cfb88de2cbf57931ac0e0f8d98259a41d744cad6a58abc7761c266f4e93aca19356b07073c09ae9d1976f4f2e1a76c350cc7764c27ae257eb69ba4213dd0a7794fa83d220439a398efd988b6dbf0de4c08bc3e4830c9e482b9e0fd1679f14e6f132cf06bae1d763dde7ce6f525ff9a0ebad28aeca16496194f2a6263a20e7afeb43d83c8c936130d6508f2bf68b5ca50375948424193a7fb1106fdf63ff72896e1b2633907f01a693218e3303436542bcf2af24cc4a41621c36768ce9a84d32cc9f3c2b108bfc78c25b1c2ea94e6e0d65406f78bdb8bc33c94a9550e5cc3e995cfbd31da03afb929418acdc89b099415f9bdb7dab7a75d44a696e14b031d601ad8d907e14a28044706c0c2955df2cb34ffea82af367e487b6cc928dc87a33fc7555173e7faa5cfd1af6d3d6f496f23a9579db22dd4a2c16e950fdc90696d95a81183765a4fbddb42c488d40ac1de28483cf1cdddf821d3f859c57b13cb7f21a916bd0d89438a17634c68637f23e2544589e8ae5ee5bced91680c087cb3105cd74a09e88d3aae17d75e", "test"},
{"$odf$*0*0*1024*16*43d3dbd907785c4fa5282a2e73a5914db3372505*8*b3d676d4519e6b5a*16*34e3f7fdfa67fb0078360b0df4011270*0*7eff7a7abf1e6b0c4a9fafe6bdcfcfeaa5b1886592a52bd255f1b51096973d6fa50d792c695f3ef82c6232ae7f89c771e27db658258ad029e82415962b270d2c859b0a3efb231a0519ec1c807082638a9fad7537dec22e20d59f2bfadfa84dd941d59dd07678f9e60ffcc1eb27d8a2ae47b616618e5e80e27309cd027724355bf78b03d5432499c1d2a91d9c67155b7f49e61bd8405e75420d0cfb9e64b238623a9d8ceb47a3fdb5e7495439bb96e79882b850a0c8d3c0fbef5e6d425ae359172b9a82ec0566c3578a9f07b86a70d75b5ad339569c1c8f588143948d63bdf88d6ed2e751ac07f25ecc5778dc06247e5a9edca869ee3335e5dae351666a618d00ec05a35bc73d330bef12a46fb53b2ff96e1b2919af4e692730b9c9664aca761df10d6cf55396c4d4c268e6e96c96515c527c8fe2716ac7a9f016941aa46e6b03e8a5069c29ec8e8614b7da3e2e154a77510393051a0b693ae40da6afb5712a4ce4ac0ebacda1f45bdccc8a7b21e153d1471665cae3205fbfa00129bf00c06777bfecba2c43a1481a00111b4f0bd30c2378bd1e2e219700406411c6f897a3dfa51b31613cb241d56b68f3c241428783b353be26fa8b2df68ca215d1cf892c10fdef94faf2381a13f8cb2bce1a7dbb7522ef0b2a83e5a96ca66417fd2928784054e80d74515c1582ad356dd865837b5ea90674a30286a72a715f621c9226f19a321b413543fbbdb7cd9d1f99668b19951304e7267554d87992fbf9a96116601d0cee9e23cb22ba474c3f721434400cacf15bae05bbe9fa17f69967d03689c48a26fa57ff9676c96767762f2661b6c8f8afa4f96f989086aa02b6f8d039c6f4d158cc33a56cbf77640fb5087b2d5a5251692bb9255d0ae8148c7157c40031fdb0ea90d5fab546a7e1e1c15bd6a27f3716776c8a3fdbdd4f34c19fef22c36117c124876606b1395bf96266d647aaf5208eefd729a42a4efe42367475315a979fb74dcb9cd30917a811ed8283f2b111bb5a5d2b0f5589b3652f17d23e352e1494f231027bb93209e3c6a0388f8b2214577dca8aa9d705758aa334d6947491488770ed8066f692f8922ff0d852c2d0f965ab3d8a13c6de0ef3cff5a15ee7b64f9b1003817f0cb919ad021d5f3b0b5c1ad58db22e8fbd63abfb40e61065bad008cdffbbe3c563780a548f4515df5c935d9aa2a3033bc8a4011c9c173a0366c9b7b07f2a27de0e55373fb4b0c7726997be6f410a2ee5980393ea005516e89538be796131e450403420d72cdbd75475fd11c50efce5eb340d55d2dd0a67ca45ddb53aa582a2ec56b46452e26a505bf730998513837c96a121e4ad13af5030392ff7fb660955e03f65894733862f2367d529f0e8cdb73272b9ce01491747cb3e1a22f5c85ab6d40ddd35d15b9d46d73600e0971da90f93cb0e9be357c4f1227fbf5b123e5b", "jumper9"},
{"$odf$*0*0*1024*16*4ec0370ab589f943131240e407a35b58a341e052*8*19cadc01889f78c0*16*dcfcb8baccda277764e4e99833ab9640*0*a7bd859d68298fbdc36b6b51eb06f7055befe08f76ca9833c6e298db8ed971bfd1315065a19e1b31b8a93624757a2583816f35d6f251ff7943be626b3dc72f0b320c9ce5d80b7cc676aa02e6a4996abd752da573ecc339d2c80a2c8bfc28a9f4ceea51c2969adf20c8762b2ee0b1835bbd31bd90d5a638cfe523a596ea95feca64ae20010ad9957a724143e25a875f3cec3cedb4df1c16ac82b46b35db269da98270c813acd5e55a2c138306decdf96b1c1079d9cfd3704d519fbc5a4a547ba5286a7e80dc434f1bf34260433cbb79c4bcbb2a5bfc5a6c2430944ef2e34e7b9c76b21a97003c1fa85f6e9c4ed984108a7d301afe4a8f6625502a4bf17b24e009717c711571da2d6acd25868892bb9e29a77da8018222cd57c91d9aad96c954355e50a4760f08aa1f1b4257f7eb1a235c9234e8fc4ed97e8ad3e5d7d128807b726a4eb0038246d8580397c0ff5873d34b5a688a4a931be7c5737e5ada3e830b02d3efb075e338d71be55751a765a21d560933812856986a4d0d0a6d4954c50631fa3dff8565057149c4c4951858be4d5dca8e492093cfd88b56a19a161e7595e2e98764e91eb51c5289dc4efa65c7b207c517e269e3c699373fe1bf177c5d641cf2cfa4bd2afe8bff53a98b2d64bedc5a2e2f2973416c66791cf012696a0e95f7a4dadb86f925fc1943cb2b75fb3eda30f7779edff7cce95ae6f0f7b45ac207a4de4ec012a3654103136e11eb496276647d5e8f6e1659951fc7ef78d60e9430027e826f2aaab7c93ef58a5af47b92cec2f17903a26e2cc5d8d09b1db55e568bfb23a6b6b46125daf71a2f3a708676101d1b657cd38e81deb74d5d877b3321349cd667c29359b45b82218ad96f6c805ac3439fc63f0c91d66da36bae3f176c23b45b8ca1945fb4a4cea5c4a7b0f6ffd547614e7016f94d3e7889ccac868578ea779cd7e6b015aafd296dd5e2da2aa7e2f2af2ce6605f53613f069194dff35ffb9a2ebb30e011c26f669ededa2c91ffb06fedc44cf23f35d7d2716abcd50a8f561721d613d8f2c689ac245a5ac084fa86c72bbe80da7d508e63d891db528fa9e8f0d608034cd97dfde70f739857672e2d70070e850c3a6521067c1774244b86cca835ca8ff1748516e694ea2b5b42555f0df9cb9ec78825c351df51a76b6fe23b58ab3e87ba94ffbb98c9fa9d50c0c282ed0e506bcad24c02d8b625b4bdac822a9e5c911d095c5e4d3bf03448add978e0e7fab7f8a7008568f01a4f06f155223086bdcfe6879e76f199afb9caeadebaa9ec4ec8120f4ccfc4f5f7d7e3cc4dd0cba4d11546d8540030769c4b6d54abdd51fa1f30da642e5ff5c35d3e711c8931ff79e9f256ac6416e99943b0000bf32a5efdd5cf1cd668a62381febe959ca472be9c1a9bade59dbba07eb035ddb1e64ae2923bd276deed788db7600d776f49339215", "RickRoll"},
{"$odf$*0*0*1024*16*399a33262bbef99543bae29a6bb069c36e3a8f1b*8*6b721193b04fa933*16*99a6342ca7221c81890035dc5033c16f*0*ef8692296b67a8a77344e87b6193dc0a370b115d9e8c85e901c1a19d03ee2a34b7bf989bf9c2edab61022ea49f2a3ce5a6c807af374afd21b52ccbd0aa13784c73d2c8feda1fe0c8ebbb94e46e32904d95d1f135759e2733c2bd30b8cb0050c1cb8a2336c1151c498b9609547e96243aed9473e0901b55137ed78e2c6057e5826cfbfb94b0d77cb12b1fb6ac2752ea71c9c05cdb6a2f3d9611cb24f6e23065b408601518e3182ba1b8cef4cfcdf6ceecb2f33267cf733d3da715562e6977015b2b6423fb416781a1b6a67252eec46cda2741163f86273a68cd241a06263fdd8fc25f1c30fd4655724cc3e5c3d8f3e84abf446dd545155e440991c5fa613b7c18bd0dabd1ad45beb508cfb2b08d4337179cba63df5095b3d640eadbd72ca07f5c908241caf384ca268355c0d13471c241ea5569a5d04a9e3505883eb1c359099c1578e4bc33a73ba74ceb4a0520e0712e3c88582549a668a9c11b8680368cfbc3c5ec02663ddd97963d9dacefed89912ffa9cd945a8634a653296163bb873f3afd1d02449494fab168e7f652230c16d35853df1164219c04c4bd17954b85eb1939d87412eeeb2a039a8bb087178c03a9a40165a28a985e8bc443071b3764d846d342ca2073223f9809fe2ee3a1dfa65b9d897877ebb33a48a760c8fb32062b51a96421256a94896e93b41f559fdec7743680a8deacff9132d6129574d1a62be94308b195d06a275947a1455600030468dde53639fd239a8ab074ec1c7f661f2c9e8d60d6e0e743d351017d5c3d3be21b67d05310d0c5f3fd670acd95ca24f91b0d84d761d15259848f736ff08610e300c31b242f6d24ac2418cdd1fe0248f8a2a2f5775c08e5571c8d25d65ff573cc403ea9cad3bafd56c166fbcec9e64909df3c6ec8095088a8992493b7180c4dbb4053dcb55d9c5f46d728a97ae4ec7ac4b5941bcc3b64a4af31f7dc673e6715a52c9cdbe23dc21e51784f8314c019fc90e8612fcffe01d026fd9e15d1474e73dedf1d3830da81320097be6953173e4293372b5e5a8ecc49ac8b1a658cff16ffa04a8c1728d02ab67694170f10bc9030939ff6df3f901faa019d9b9fd2ba23e89eb0bbaf7a69a2272ee1df0403e6435aee147da217e8bf4c1ee5c53eb83aac1b3f8772d5cd2a2686f312ac4f4f2b0733593e28305a550dbbd18d3405a464ff20e0d9364cfe49b82a97ef7303aec92004a3476cf9ad012eaaf10fd07d3823e1b6871e82113ecfe4392854de9ab21ab1e33ce93d1abb07018007f50d641c8eb85b28fd335fd2281745772c98f8f0bba3f4d40ba602545ef8a0db3062f02d7ee5f49b42cbe19c0c2124952f98c49aff6927110314e54fe8d47a10f13d2d4055c1f3f2d679d4043c9b2f68b2220b6c6c738f6402c01d000c9394c8ed27e70c7ee6108d3e7e809777bab9be30b33a3fb83271cbf3b", "WhoCanItBeNow"},
/* CMIYC 2013 "pro" hard hash */
{"$odf$*1*1*1024*32*7db40092b3857fa319bc0d717b60cefc40b1d51ef92ebc893c518ffebffdf200*16*5f7c8ab6e5d1c41dbd23c384fee957ed*16*9ff092f2dd29dab6ce5fb43ad7bbdd5a*0*bac8343436715b40aaf4690a7dc57b0f82b8f25f8ad0f9833e32468410d4dd02e387a067872b5847adc9a276c86a03113e11b903854202eec361c5b7ba74bcb254a4f76d97ca45dbe30fe49f78ce9cf7df0246ae4524b8f13ad28357838559c116d9ed59267f4df91da3ea9758c132e2ebc40fd4ee8e9978921a0847d7ca5c30ef911e0b88f9fc84039633eacf5e023c82dd1a573abd7663b8f36a039d42ed91b4a0665902f174be8cefefd367ba9b5da95768550e567242f1b2e2c3866eb8aa3c12d0b34277929616319ea29dd9a3b9addb963d45c7d4c2b54a99b0c1cf24cac3e981ed4e178e621938b83be30f54d37d6425a0b7ac9dff5504830fe1d1f136913c32d8f732eb55e6179ad2699fd851af3a44f8ca914117344e6fadf501bf6f6e0ae7970a2b58eb3af0d89c78411c6adde8aa1f0e8b69c261fd04835cdc3ddf0a6d67ddff33995b5cc7439db83f90c8a2e07e2513771fffcf8b55ce1a382b14ffbf22be9bdd6f83a9b7602995c9793dfffb32c9eb16930c0bb55e5a8364fa06a59fca5af27df4a02565db2b4718ed44405f67a052738692c189039a7fd63713207616eeeebace3c0a3963dd882c485523f49fa0bc2663fc6ef090a220dd5c6554bc0702da8c3122383ea8a009837d549d58ad688c9cc4b8461fe70f4600539cd1d82edd4e110b1c1472dae40adc3126e2a09dd2753dcd83799841745160e235652f601d1257268321f22d19bd9dc811afaf143765c7cb53717ea329e9e4064a3cf54b33d006e93b83102e2ad3327f6d995cb598bd96466b1287e6da9967f4f034c63fd06c6e5c7ec25008c122385f271d18918cff3823f9fbdb37791e7371ce1d6a4ab08c12eca5fceb7c9aa7ce25a8bd640a68c622ddd858973426cb28e65c4c3421b98ebf4916b8c2bfe71b2afec4ab2f99291a4c4d3312521850d46436aecd9e2e93a8619dbc3c1caf4507bb488ce921cd8d13a1640e6c49403e0416924b3b1a01c9939c7bcdec50f057d6f4dccf0afc8c2ad37c4f8429c77cf19ad49db5e5219e965a3ed5d56d799689bd93642602d7959df0493ea62cccff83e66d85bf45d6b5b03e8cfca84daf37ecfccb60f85f3c5102900a02a5df015b1bf1ef55dfb2ab20321bcf3325d1adce22d4456837dcc589ef36d4f06ccdcc96ef10ff806d76f0044e92e192b946ae0f09860a38c2a6052fe84c3e9bb9380e2b344812376c6bbd5c9858745dbd072798a3d7eff31ae5d509c11b5269ec6f2108cb6e72a5ab495ea7aed5bf3dabedbb517dc4ceff818a8e890a6ea9a91bab37e8a463a9d04993c5ba7e40e743e033842540806d4a65258d0f4d5988e1e0011f0e85fcae3b2819c1f17f5c7980ecd87aee425cdab4f34bfb7a31ee7936c60f2f4f52aea67aef4736a419dc9c559279b569f61995eb2d6b7c204c3e9f56ca5c8a889812a30c33", "juNK^r00M!"},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int cipher_type;
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int content_length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res;
if (strncmp(ciphertext, "$odf$*", 6))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 6;
if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */
goto err;
if (strlen(p) != 1)
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */
goto err;
if (strlen(p) != 1)
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
res = hexlenl(p);
if (res != BINARY_SIZE * 2 && res != 64) // 2 hash types.
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res > 16 || res < 0)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p) != res * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
if (strlen(p) >= 10)
goto err;
res = atoi(p);
if (res > 32 || res < 0)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p) != res * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* something */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* content */
goto err;
res = strlen(p);
if (res > 2048 || res & 1)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += 6; /* skip over "$odf$*" */
p = strtokm(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtokm(NULL, "*");
cs.checksum_type = atoi(p);
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.key_size = atoi(p);
strtokm(NULL, "*");
/* skip checksum field */
p = strtokm(NULL, "*");
cs.iv_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.salt_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
strtokm(NULL, "*");
p = strtokm(NULL, "*");
memset(cs.content, 0, sizeof(cs.content));
for (i = 0; p[i * 2] && i < 1024; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
cs.content_length = i;
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += 6; /* skip over "$odf$*" */
strtokm(ctcopy, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
p = strtokm(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char key[MAX_KEYS_PER_CRYPT][32];
unsigned char hash[MAX_KEYS_PER_CRYPT][32];
BF_KEY bf_key;
int bf_ivec_pos, i;
unsigned char ivec[8];
unsigned char output[1024];
SHA_CTX ctx;
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
#endif
if(cur_salt->checksum_type == 0 && cur_salt->cipher_type == 0) {
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));
SHA1_Final((unsigned char *)(hash[i]), &ctx);
}
#ifdef SIMD_COEF_32
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 20;
pin[i] = hash[i];
pout[i] = key[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, pout,
cur_salt->key_size, 0);
#else
pbkdf2_sha1(hash[0], 20, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, key[0],
cur_salt->key_size, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
bf_ivec_pos = 0;
memcpy(ivec, cur_salt->iv, 8);
BF_set_key(&bf_key, cur_salt->key_size, key[i]);
BF_cfb64_encrypt(cur_salt->content, output, cur_salt->content_length, &bf_key, ivec, &bf_ivec_pos, 0);
SHA1_Init(&ctx);
SHA1_Update(&ctx, output, cur_salt->content_length);
SHA1_Final((unsigned char*)crypt_out[index+i], &ctx);
}
}
else {
SHA256_CTX ctx;
AES_KEY akey;
unsigned char iv[16];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA256_Init(&ctx);
SHA256_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));
SHA256_Final((unsigned char *)hash[i], &ctx);
}
#ifdef SIMD_COEF_32
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 32;
pin[i] = hash[i];
pout[i] = key[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, pout,
cur_salt->key_size, 0);
#else
pbkdf2_sha1(hash[0], 32, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, key[0],
cur_salt->key_size, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
memcpy(iv, cur_salt->iv, 16);
memset(&akey, 0, sizeof(AES_KEY));
if(AES_set_decrypt_key(key[i], 256, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed!\n");
}
AES_cbc_encrypt(cur_salt->content, output, cur_salt->content_length, &akey, iv, AES_DECRYPT);
SHA256_Init(&ctx);
SHA256_Update(&ctx, output, cur_salt->content_length);
SHA256_Final((unsigned char*)crypt_out[index+i], &ctx);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void odf_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
/*
* The format tests all have iteration count 1024.
* Just in case the iteration count is tunable, let's report it.
*/
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_odf = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
odf_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
odf_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
elemwise_binary_scalar_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file elemwise_binary_scalar_op.h
* \brief Function definition of elementwise binary scalar operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
#include <mxnet/operator_util.h>
#include <vector>
#include <utility>
#include "../mshadow_op.h"
#include "../elemwise_op_common.h"
#include "elemwise_unary_op.h"
namespace mxnet {
namespace op {
class BinaryScalarOp : public UnaryOp {
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
const double alpha = nnvm::get<double>(attrs.parsed);
CHECK_EQ(output.shape(), input.shape());
const int64_t row_count = output.shape()[0];
const int64_t items_per_row = output.shape().Size() / row_count;
const DType result_for_zero = OP::Map(DType(0), DType(alpha));
mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream);
mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream);
const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size();
if (sparse_row_count != row_count) {
mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data(
rowsparse::kIdx).FlatTo1D<cpu, IType>(stream);
int64_t input_iter = 0;
int64_t output_row = 0;
IType next_input_row = 0;
while (output_row < row_count) {
next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter])
: row_count;
// Split up into blocks of contiguous data and do those together
// Do contiguous dense blocks
const int64_t dense_block_count = next_input_row - output_row;
if (dense_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<OpBase::set_to_scalar<Req>, cpu>::Launch(
stream,
items_per_row * dense_block_count,
output_data.dptr_ + items_per_row * output_row,
result_for_zero);
});
output_row += dense_block_count;
continue;
}
// Do contiguous sparse blocks
int64_t next_non_contiguous_sparse = input_iter;
while (next_non_contiguous_sparse < sparse_row_count - 1) {
if (row_indexes[next_non_contiguous_sparse + 1]
!= row_indexes[next_non_contiguous_sparse] + 1) {
break;
}
++next_non_contiguous_sparse;
}
const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1;
if (sparse_block_count > 0) {
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * sparse_block_count,
&output_data.dptr_[items_per_row * output_row],
&input_data.dptr_[items_per_row * input_iter],
DType(alpha));
});
output_row += sparse_block_count;
input_iter += sparse_block_count;
continue;
}
}
} else {
// All rows exist (eventually we don't have to do complex
// things to call GPU kernels because we don't need to access row indices)
MXNET_ASSIGN_REQ_SWITCH(req, Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch(
stream,
items_per_row * row_count,
output_data.dptr_,
input_data.dptr_,
DType(alpha));
});
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType>
static void ComputeExDenseResultRsp(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<cpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
CHECK_EQ(output.shape(), input.shape());
const double alpha = nnvm::get<double>(attrs.parsed);
const DType dense_fill_val = OP::Map(DType(0), DType(alpha));
const TBlob column_indexes = input.aux_data(csr::kIdx);
const size_t item_count = column_indexes.Size();
// Pre-fill dense with 0-input/output value
FillDense<DType>(stream, output.shape().Size(), dense_fill_val,
req, output.data().dptr<DType>());
mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data());
if (item_count) {
const DType *in = input.data().dptr<DType>();
const IType *column_indexes_ptr = column_indexes.dptr<IType>();
const auto row_count = static_cast<size_t>(input.shape()[0]);
const TBlob row_starts = input.aux_data(csr::kIndPtr);
const CType *row_starts_ptr = row_starts.dptr<CType>();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(row_count); ++i) {
const bool last_row = i == static_cast<int>(row_count) - 1;
// Split up into blocks of contiguous data and do those together
const size_t row_item_start_iter = row_starts_ptr[i];
const size_t input_items_this_row = !last_row
? static_cast<size_t>(row_starts_ptr[i + 1])
- row_item_start_iter
: item_count - row_item_start_iter;
if (input_items_this_row) {
const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter;
const DType *row_data_start = in + row_item_start_iter;
DType *output_this_row = out[i].dptr_;
// More overhead to use OMP for small loops, so don't
if (input_items_this_row > 1000) {
#pragma omp parallel for
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
} else {
for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) {
const IType col = this_row_column_indexes[j];
const DType val = row_data_start[j];
output_this_row[col] = OP::Map(val, DType(alpha));
}
}
}
}
}
}
/*! \brief Tensor operation against a scalar with a dense result */
template<typename OP, typename DType, typename IType, typename CType>
static void ComputeExDenseResultCsr(mshadow::Stream<gpu> *stream,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray &output) {
LOG(FATAL) << "NOT IMPLEMENTED";
}
template<typename xpu, typename OP, typename DType, typename IType>
static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &input,
const OpReqType req,
const NDArray output) {
mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>();
CHECK_EQ(output.storage_type(), kDefaultStorage);
switch (input.storage_type()) {
case kRowSparseStorage: {
ComputeExDenseResultRsp<OP, DType, IType>(stream, attrs, ctx, input, req, output);
break;
}
case kCSRStorage: {
MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, {
ComputeExDenseResultCsr<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output);
});
break;
}
default:
CHECK(false) << "Unsupported sparse storage type";
break;
}
}
public:
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s,
inputs[0].Size(),
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(),
DType(alpha));
});
});
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
DCHECK_EQ(inputs.size(), 1);
DCHECK_EQ(outputs.size(), 1);
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (req[0] == kNullOp) {
return;
}
if ((in_stype == kRowSparseStorage && out_stype == kRowSparseStorage) ||
(in_stype == kCSRStorage && out_stype == kCSRStorage)) {
// csr -> csr, or rsp -> rsp
UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>);
} else if (out_stype == kDefaultStorage &&
(in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]);
});
});
} else {
LOG(FATAL) << "Not implemented: " << operator_string(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename OP>
static void Backward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
Stream<xpu> *s = ctx.get_stream<xpu>();
const double alpha = nnvm::get<double>(attrs.parsed);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
Tensor<xpu, 1, DType> igrad = outputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> ograd = inputs[0].FlatTo1D<xpu, DType>(s);
Tensor<xpu, 1, DType> lhs = inputs[1].FlatTo1D<xpu, DType>(s);
ASSIGN_DISPATCH(igrad, req[0], ograd * F<OP>(lhs, scalar<DType>(DType(alpha))));
});
}
};
#define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr_parser([](NodeAttrs* attrs) { \
attrs->parsed = std::stod(attrs->dict["scalar"]); \
}) \
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}}; \
}) \
.add_argument("data", "NDArray-or-Symbol", "source input") \
.add_argument("scalar", "float", "scalar input")
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_unop__identity_fc32_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc32_fc64)
// op(A') function: GB (_unop_tran__identity_fc32_fc64)
// C type: GxB_FC32_t
// A type: GxB_FC64_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc32_fc64)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij)) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) creal (aij), (float) cimag (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// author:BUG1989 (https://github.com/BUG1989/) Long-term support.
// author:FuGuangping (https://github.com/fu1899) Implemented the first version of INT8 quantization on ARMv7.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd23_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch, const Option& opt)
{
Mat kernel_tm(4 * 4, inch, outch, (size_t)2u);
// G
const short ktm[4][3] = {
{2, 0, 0},
{1, 1, 1},
{1, -1, 1},
{0, 0, 2}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r = 0; r < 4; r++)
{
Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)2u);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const short* kernel0 = (const short*)kernel_tm + (p + 0) * inch * 16;
const short* kernel1 = (const short*)kernel_tm + (p + 1) * inch * 16;
const short* kernel2 = (const short*)kernel_tm + (p + 2) * inch * 16;
const short* kernel3 = (const short*)kernel_tm + (p + 3) * inch * 16;
const short* kernel4 = (const short*)kernel_tm + (p + 4) * inch * 16;
const short* kernel5 = (const short*)kernel_tm + (p + 5) * inch * 16;
const short* kernel6 = (const short*)kernel_tm + (p + 6) * inch * 16;
const short* kernel7 = (const short*)kernel_tm + (p + 7) * inch * 16;
short* ktmp = kernel_tm_test.channel(p / 8);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 16;
kernel1 += 16;
kernel2 += 16;
kernel3 += 16;
kernel4 += 16;
kernel5 += 16;
kernel6 += 16;
kernel7 += 16;
}
}
for (; p + 3 < outch; p += 4)
{
const short* kernel0 = (const short*)kernel_tm + (p + 0) * inch * 16;
const short* kernel1 = (const short*)kernel_tm + (p + 1) * inch * 16;
const short* kernel2 = (const short*)kernel_tm + (p + 2) * inch * 16;
const short* kernel3 = (const short*)kernel_tm + (p + 3) * inch * 16;
short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 16;
kernel1 += 16;
kernel2 += 16;
kernel3 += 16;
}
}
for (; p < outch; p++)
{
const short* kernel0 = (const short*)kernel_tm + p * inch * 16;
short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 16;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd23_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in FeatherCNN
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 4, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 2;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row<short>(q);
short* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row<short>(q);
short* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row<short>(q);
short* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row<short>(q);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// load
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.8b}, [%0] \n"
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v1.8b}, [%1] \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v2.8b}, [%2] \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v3.8b}, [%3] \n"
// w = B_t * d, trans int8 to int16
"ssubl v4.8h, v0.8b, v2.8b \n" // d4
"saddl v5.8h, v1.8b, v2.8b \n" // d6
"ssubl v6.8h, v2.8b, v1.8b \n" // d8
"ssubl v7.8h, v3.8b, v1.8b \n" // d10
// transpose w to w_t
"trn1 v8.4h, v4.4h, v5.4h \n"
"trn2 v9.4h, v4.4h, v5.4h \n"
"trn1 v10.4h, v6.4h, v7.4h \n"
"trn2 v11.4h, v6.4h, v7.4h \n"
"trn1 v0.2s, v8.2s, v10.2s \n"
"trn2 v2.2s, v8.2s, v10.2s \n"
"trn1 v1.2s, v9.2s, v11.2s \n"
"trn2 v3.2s, v9.2s, v11.2s \n"
// U = B_t * d_t
"sub v4.4h, v0.4h, v2.4h \n"
"add v5.4h, v1.4h, v2.4h \n"
"sub v6.4h, v2.4h, v1.4h \n"
"sub v7.4h, v3.4h, v1.4h \n"
// save
"st1 {v4.4h}, [%4] \n"
"st1 {v5.4h}, [%5] \n"
"st1 {v6.4h}, [%6] \n"
"st1 {v7.4h}, [%7] \n"
: "=r"(r0), // %0
"=r"(r1), // %1
"=r"(r2), // %2
"=r"(r3), // %3
"=r"(out_tm0), // %4
"=r"(out_tm1), // %5
"=r"(out_tm2), // %6
"=r"(out_tm3) // %7
: "0"(r0),
"1"(r1),
"2"(r2),
"3"(r3),
"4"(out_tm0),
"5"(out_tm1),
"6"(out_tm2),
"7"(out_tm3)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else
asm volatile(
// load
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"pld [%1, #64] \n"
"vld1.s8 {d1}, [%1] \n"
"pld [%2, #64] \n"
"vld1.s8 {d2}, [%2] \n"
"pld [%3, #64] \n"
"vld1.s8 {d3}, [%3] \n"
// w = B_t * d, trans int8 to int16
"vsubl.s8 q2, d0, d2 \n" // d4
"vaddl.s8 q3, d1, d2 \n" // d6
"vsubl.s8 q4, d2, d1 \n" // d8
"vsubl.s8 q5, d3, d1 \n" // d10
// transpose w to w_t
"vtrn.s16 d4, d6 \n"
"vtrn.s16 d8, d10 \n"
"vtrn.s32 d4, d8 \n"
"vtrn.s32 d6, d10 \n"
// U = B_t * d_t
"vsub.s16 d11, d4, d8 \n"
"vadd.s16 d12, d6, d8 \n"
"vsub.s16 d13, d8, d6 \n"
"vsub.s16 d14, d10, d6 \n"
// save
"vst1.s32 {d11}, [%4] \n"
"vst1.s32 {d12}, [%5] \n"
"vst1.s32 {d13}, [%6] \n"
"vst1.s32 {d14}, [%7] \n"
: "=r"(r0), // %0
"=r"(r1), // %1
"=r"(r2), // %2
"=r"(r3), // %3
"=r"(out_tm0), // %4
"=r"(out_tm1), // %5
"=r"(out_tm2), // %6
"=r"(out_tm3) // %7
: "0"(r0),
"1"(r1),
"2"(r2),
"3"(r3),
"4"(out_tm0),
"5"(out_tm1),
"6"(out_tm2),
"7"(out_tm3)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7");
#endif // __aarch64__
#else
short d0[4], d1[4], d2[4], d3[4];
short w0[4], w1[4], w2[4], w3[4];
short t0[4], t1[4], t2[4], t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
}
// U = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n] = d0[n];
out_tm1[n] = d1[n];
out_tm2[n] = d2[n];
out_tm3[n] = d3[n];
}
#endif
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in FeatherCNN
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 4; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
int* output4_tm = top_blob_tm.channel(p + 4);
int* output5_tm = top_blob_tm.channel(p + 5);
int* output6_tm = top_blob_tm.channel(p + 6);
int* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"mov w4, %w20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%8] \n"
"ld1 {v9.4h, v10.4h}, [%9] \n" // _k0 = vld1q_s16(kptr);
"add %9, %9, #16 \n"
"ld1 {v11.4h, v12.4h}, [%9] \n" // _k0n = vld1q_s16(kptr+8);
"add %9, %9, #16 \n"
"ld1 {v13.4h, v14.4h}, [%9] \n" // _k1 = vld1q_s16(kptr+16);
"add %9, %9, #16 \n"
"ld1 {v15.4h, v16.4h}, [%9] \n" // _k1n = vld1q_s16(kptr+24);
"add %8, %8, #8 \n"
"add %9, %9, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43)
"smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53)
"smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63)
"smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
"st1 {v4.4s}, [%4] \n" //
"st1 {v5.4s}, [%5] \n" //
"st1 {v6.4s}, [%6] \n" //
"st1 {v7.4s}, [%7] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"vmov.s32 q4, #0 \n"
"vmov.s32 q5, #0 \n"
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"mov r4, %20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%9] \n" // _k0 = vld1q_s16(kptr);
"add %9, #16 \n"
"vld1.s16 {d20-d21}, [%9] \n" // _k0n = vld1q_s16(kptr+8);
"add %9, #16 \n"
"vld1.s16 {d22-d23}, [%9] \n" // _k1 = vld1q_s16(kptr+16);
"add %9, #16 \n"
"vld1.s16 {d24-d25}, [%9] \n" // _k1n = vld1q_s16(kptr+24);
"add %9, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43)
"vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53)
"vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63)
"vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
"vst1.s32 {d8-d9}, [%4] \n"
"vst1.s32 {d10-d11}, [%5] \n"
"vst1.s32 {d12-d13}, [%6] \n"
"vst1.s32 {d14-d15}, [%7] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int sum4[4] = {0};
int sum5[4] = {0};
int sum6[4] = {0};
int sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
sum4[n] += (int)r0[n] * kptr[n + 16];
sum5[n] += (int)r0[n] * kptr[n + 20];
sum6[n] += (int)r0[n] * kptr[n + 24];
sum7[n] += (int)r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __ARM_NEON
output0_tm += 16;
output1_tm += 16;
output2_tm += 16;
output3_tm += 16;
output4_tm += 16;
output5_tm += 16;
output6_tm += 16;
output7_tm += 16;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"mov w4, %w12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%4] \n"
"ld1 {v9.4h, v10.4h}, [%5] \n" // _k0 = vld1q_s16(kptr);
"add %5, %5, #16 \n"
"ld1 {v11.4h, v12.4h}, [%5] \n" // _k0n = vld1q_s16(kptr+8);
"add %4, %4, #8 \n"
"add %5, %5, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"mov r4, %12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%5] \n" // _k0 = vld1q_s16(kptr);
"add %5, #16 \n"
"vld1.s16 {d20-d21}, [%5] \n" // _k0n = vld1q_s16(kptr+8);
"add %5, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __ARM_NEON
output0_tm += 16;
output1_tm += 16;
output2_tm += 16;
output3_tm += 16;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"mov w4, %w6 \n"
"0: \n" // for (int q=0; q<inch; q++)
//"prfm pldl1keep, [%2, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%1] \n"
"ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"mov r4, %6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"add %1, #8 \n"
"vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %2, #8 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q8", "q9");
#endif // __aarch64__
#else
int sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif
output0_tm += 16;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in FeatherCNN
int nRowBlocks = w_tm / 4;
#if __ARM_NEON
int32x2_t _shift = vdup_n_s32(-2);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* out_tile = top_blob_tm.channel(p);
int* outRow0 = top_blob_bordered.channel(p);
int* outRow1 = outRow0 + outw;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"add v0.4s, v0.4s, v1.4s \n" // s0 = s0 + s1 + s2;
"sub v1.4s, v1.4s, v2.4s \n"
"add v0.4s, v0.4s, v2.4s \n" // s1 = s1 - s2 + s3;
"add v1.4s, v1.4s, v3.4s \n"
"trn1 v4.4s, v0.4s, v1.4s \n"
"trn2 v5.4s, v0.4s, v1.4s \n"
"dup v6.2d, v4.d[1] \n"
"dup v7.2d, v5.d[1] \n"
"add v0.2s, v4.2s, v5.2s \n" // o0 = d0 + d1 + d2;
"sub v1.2s, v5.2s, v6.2s \n"
"add v0.2s, v0.2s, v6.2s \n" // o1 = d1 - d2 + d3;
"add v1.2s, v1.2s, v7.2s \n"
"sshl v0.2s, v0.2s, %6.2s \n" // o0 = o0 >> 2
"sshl v1.2s, v1.2s, %6.2s \n" // o1 = o1 >> 2
"st1 {v0.2s}, [%1], #8 \n"
"st1 {v1.2s}, [%2], #8 \n"
: "=r"(out_tile), // %0
"=r"(outRow0), // %1
"=r"(outRow1) // %2
: "0"(out_tile),
"1"(outRow0),
"2"(outRow1),
"w"(_shift) // %6
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d0-d7} \n"
"vaddq.s32 q0, q0, q1 \n" // s0 = s0 + s1 + s2;
"vsubq.s32 q1, q1, q2 \n"
"vaddq.s32 q0, q0, q2 \n" // s1 = s1 - s2 + s3;
"vaddq.s32 q1, q1, q3 \n"
"vtrn.s32 q0, q1 \n"
"vadd.s32 d8, d0, d2 \n" // o0 = d0 + d1 + d2;
"vsub.s32 d9, d2, d1 \n"
"vadd.s32 d8, d8, d1 \n" // o1 = d1 - d2 + d3;
"vadd.s32 d9, d9, d3 \n"
"vshl.s32 d8, d8, %P6 \n" // o0 = o0 >> 2
"vshl.s32 d9, d9, %P6 \n" // o1 = o1 >> 2
"vst1.s32 {d8}, [%1]! \n"
"vst1.s32 {d9}, [%2]! \n"
: "=r"(out_tile), // %0
"=r"(outRow0), // %1
"=r"(outRow1) // %2
: "0"(out_tile),
"1"(outRow0),
"2"(outRow1),
"w"(_shift) // %6
: "cc", "memory", "q0", "q1", "q2", "q3", "q4");
#endif // __aarch64__
#else
int s0[4], s1[4], s2[4], s3[4];
int w0[4], w1[4];
int d0[2], d1[2], d2[2], d3[2];
int o0[2], o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 4];
s2[n] = out_tile[n + 8];
s3[n] = out_tile[n + 12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d1[0] = w0[1];
d1[1] = w1[1];
d2[0] = w0[2];
d2[1] = w1[2];
d3[0] = w0[3];
d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n];
o1[n] = d1[n] - d2[n] + d3[n];
}
// save to top blob tm,why right 2,because the G' = G*2
outRow0[0] = o0[0] >> 2;
outRow0[1] = o0[1] >> 2;
outRow1[0] = o1[0] >> 2;
outRow1[1] = o1[1] >> 2;
out_tile += 16;
outRow0 += 2;
outRow1 += 2;
#endif // __ARM_NEON
}
outRow0 += outw;
outRow1 += outw;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_int8_neon(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch, const Option& opt)
{
Mat kernel_tm(6 * 6, inch, outch, (size_t)2u);
// G
// const float ktm[6][3] = {
// { 1.0f/4, 0.0f, 0.0f},
// { -1.0f/6, -1.0f/6, -1.0f/6},
// { -1.0f/6, 1.0f/6, -1.0f/6},
// { 1.0f/24, 1.0f/12, 1.0f/6},
// { 1.0f/24, -1.0f/12, 1.0f/6},
// { 0.0f, 0.0f, 1.0f}
// };
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r = 0; r < 9; r++)
{
Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)2u);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const short* kernel0 = (const short*)kernel_tm.channel(p);
const short* kernel1 = (const short*)kernel_tm.channel(p + 1);
const short* kernel2 = (const short*)kernel_tm.channel(p + 2);
const short* kernel3 = (const short*)kernel_tm.channel(p + 3);
const short* kernel4 = (const short*)kernel_tm.channel(p + 4);
const short* kernel5 = (const short*)kernel_tm.channel(p + 5);
const short* kernel6 = (const short*)kernel_tm.channel(p + 6);
const short* kernel7 = (const short*)kernel_tm.channel(p + 7);
short* ktmp = kernel_tm_test.channel(p / 8);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const short* kernel0 = (const short*)kernel_tm.channel(p);
const short* kernel1 = (const short*)kernel_tm.channel(p + 1);
const short* kernel2 = (const short*)kernel_tm.channel(p + 2);
const short* kernel3 = (const short*)kernel_tm.channel(p + 3);
short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const short* kernel0 = (const short*)kernel_tm.channel(p);
short* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd43_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 9, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 4;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
const signed char* r4 = r3 + w;
const signed char* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row<short>(q);
short* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row<short>(q);
short* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row<short>(q);
short* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row<short>(q);
short* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row<short>(q);
short* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row<short>(q);
short* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row<short>(q);
short* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row<short>(q);
short* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row<short>(q);
#if __ARM_NEON
int8x8_t _d0, _d1, _d2, _d3, _d4, _d5;
int16x8_t _w0, _w1, _w2, _w3, _w4, _w5;
int16x8_t _t0, _t1, _t2, _t3, _t4, _t5;
int16x8_t _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = vld1_s8(r0);
_d1 = vld1_s8(r1);
_d2 = vld1_s8(r2);
_d3 = vld1_s8(r3);
_d4 = vld1_s8(r4);
_d5 = vld1_s8(r5);
int8x8_t _1_n = vdup_n_s8(-1);
int8x8_t _2_p = vdup_n_s8(2);
int8x8_t _2_n = vdup_n_s8(-2);
int8x8_t _4_p = vdup_n_s8(4);
int8x8_t _4_n = vdup_n_s8(-4);
int8x8_t _5_n = vdup_n_s8(-5);
int16x8_t _1_n_s16 = vdupq_n_s16(-1);
int16x8_t _2_p_s16 = vdupq_n_s16(2);
int16x8_t _2_n_s16 = vdupq_n_s16(-2);
int16x8_t _4_p_s16 = vdupq_n_s16(4);
int16x8_t _4_n_s16 = vdupq_n_s16(-4);
int16x8_t _5_n_s16 = vdupq_n_s16(-5);
// w = B_t * d
_w0 = vmull_s8(_d0, _4_p);
_w0 = vmlal_s8(_w0, _d2, _5_n);
_w0 = vaddw_s8(_w0, _d4);
_w1 = vmull_s8(_d1, _4_n);
_w1 = vmlal_s8(_w1, _d2, _4_n);
_w1 = vaddw_s8(_w1, _d3);
_w1 = vaddw_s8(_w1, _d4);
_w2 = vmull_s8(_d1, _4_p);
_w2 = vmlal_s8(_w2, _d2, _4_n);
_w2 = vmlal_s8(_w2, _d3, _1_n);
_w2 = vaddw_s8(_w2, _d4);
_w3 = vmull_s8(_d1, _2_n);
_w3 = vmlal_s8(_w3, _d2, _1_n);
_w3 = vmlal_s8(_w3, _d3, _2_p);
_w3 = vaddw_s8(_w3, _d4);
_w4 = vmull_s8(_d1, _2_p);
_w4 = vmlal_s8(_w4, _d2, _1_n);
_w4 = vmlal_s8(_w4, _d3, _2_n);
_w4 = vaddw_s8(_w4, _d4);
_w5 = vmull_s8(_d1, _4_p);
_w5 = vmlal_s8(_w5, _d3, _5_n);
_w5 = vaddw_s8(_w5, _d5);
// transpose d to d_t
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
// d = B_t * d_t
_n0 = vmulq_s16(_t0, _4_p_s16);
_n0 = vmlaq_s16(_n0, _t2, _5_n_s16);
_n0 = vaddq_s16(_n0, _t4);
_n1 = vmulq_s16(_t1, _4_n_s16);
_n1 = vmlaq_s16(_n1, _t2, _4_n_s16);
_n1 = vaddq_s16(_n1, _t3);
_n1 = vaddq_s16(_n1, _t4);
_n2 = vmulq_s16(_t1, _4_p_s16);
_n2 = vmlaq_s16(_n2, _t2, _4_n_s16);
_n2 = vmlaq_s16(_n2, _t3, _1_n_s16);
_n2 = vaddq_s16(_n2, _t4);
_n3 = vmulq_s16(_t1, _2_n_s16);
_n3 = vmlaq_s16(_n3, _t2, _1_n_s16);
_n3 = vmlaq_s16(_n3, _t3, _2_p_s16);
_n3 = vaddq_s16(_n3, _t4);
_n4 = vmulq_s16(_t1, _2_p_s16);
_n4 = vmlaq_s16(_n4, _t2, _1_n_s16);
_n4 = vmlaq_s16(_n4, _t3, _2_n_s16);
_n4 = vaddq_s16(_n4, _t4);
_n5 = vmulq_s16(_t1, _4_p_s16);
_n5 = vmlaq_s16(_n5, _t3, _5_n_s16);
_n5 = vaddq_s16(_n5, _t5);
// save to out_tm
out_tm0[0] = _n0[0];
out_tm0[1] = _n0[1];
out_tm0[2] = _n0[2];
out_tm0[3] = _n0[3];
out_tm1[0] = _n0[4];
out_tm1[1] = _n0[5];
out_tm1[2] = _n1[0];
out_tm1[3] = _n1[1];
out_tm2[0] = _n1[2];
out_tm2[1] = _n1[3];
out_tm2[2] = _n1[4];
out_tm2[3] = _n1[5];
out_tm3[0] = _n2[0];
out_tm3[1] = _n2[1];
out_tm3[2] = _n2[2];
out_tm3[3] = _n2[3];
out_tm4[0] = _n2[4];
out_tm4[1] = _n2[5];
out_tm4[2] = _n3[0];
out_tm4[3] = _n3[1];
out_tm5[0] = _n3[2];
out_tm5[1] = _n3[3];
out_tm5[2] = _n3[4];
out_tm5[3] = _n3[5];
out_tm6[0] = _n4[0];
out_tm6[1] = _n4[1];
out_tm6[2] = _n4[2];
out_tm6[3] = _n4[3];
out_tm7[0] = _n4[4];
out_tm7[1] = _n4[5];
out_tm7[2] = _n5[0];
out_tm7[3] = _n5[1];
out_tm8[0] = _n5[2];
out_tm8[1] = _n5[3];
out_tm8[2] = _n5[4];
out_tm8[3] = _n5[5];
#else
short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __ARM_NEON
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
int* output4_tm = top_blob_tm.channel(p + 4);
int* output5_tm = top_blob_tm.channel(p + 5);
int* output6_tm = top_blob_tm.channel(p + 6);
int* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"mov w4, %w20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0);
"ld1 {v8.4h}, [%8] \n"
"ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr);
"add %9, %9, #16 \n"
"ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8);
"add %9, %9, #16 \n"
"ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16);
"add %9, %9, #16 \n"
"ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24);
"add %8, %8, #8 \n"
"add %9, %9, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43)
"smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53)
"smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63)
"smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
"st1 {v4.4s}, [%4] \n" //
"st1 {v5.4s}, [%5] \n" //
"st1 {v6.4s}, [%6] \n" //
"st1 {v7.4s}, [%7] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
#else
int nn = inch;
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"vmov.s32 q4, #0 \n"
"vmov.s32 q5, #0 \n"
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr);
"add %9, #16 \n"
"vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8);
"add %9, #16 \n"
"vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16);
"add %9, #16 \n"
"vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24);
"add %9, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43)
"vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53)
"vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63)
"vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73)
"subs %10, %10, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
"vst1.s32 {d8-d9}, [%4] \n"
"vst1.s32 {d10-d11}, [%5] \n"
"vst1.s32 {d12-d13}, [%6] \n"
"vst1.s32 {d14-d15}, [%7] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr), // %9
"=r"(nn) // %10
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"10"(nn)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int sum4[4] = {0};
int sum5[4] = {0};
int sum6[4] = {0};
int sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
sum4[n] += (int)r0[n] * kptr[n + 16];
sum5[n] += (int)r0[n] * kptr[n + 20];
sum6[n] += (int)r0[n] * kptr[n + 24];
sum7[n] += (int)r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __ARM_NEON
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"mov w4, %w12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%4] \n"
"ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr);
"add %5, %5, #16 \n"
"ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8);
"add %4, %4, #8 \n"
"add %5, %5, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"mov r4, %12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr);
"add %5, #16 \n"
"vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8);
"add %5, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __ARM_NEON
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"mov w4, %w6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"mov r4, %6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"add %1, #8 \n"
"vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %2, #8 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q8", "q9");
#endif // __aarch64__
#else // __ARM_NEON
int sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __ARM_NEON
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// int* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
// const short* k0 = kernel0_tm.row<short>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* out_tile = top_blob_tm.channel(p);
int* outRow0 = top_blob_bordered.channel(p);
int* outRow1 = outRow0 + outw;
int* outRow2 = outRow0 + outw * 2;
int* outRow3 = outRow0 + outw * 3;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
#if __ARM_NEON
int32x4_t _s0, _s1, _s2, _s3, _s4, _s5;
int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n;
int32x4_t _w0, _w3;
int32x2_t _w0n, _w3n;
int32x4_t _d0, _d1, _d2, _d3, _d4, _d5;
int32x4_t _o0, _o1, _o2, _o3;
// load
_s0 = vld1q_s32(out_tile);
_s0n = vld1_s32(out_tile + 4);
_s1 = vld1q_s32(out_tile + 6);
_s1n = vld1_s32(out_tile + 10);
_s2 = vld1q_s32(out_tile + 12);
_s2n = vld1_s32(out_tile + 16);
_s3 = vld1q_s32(out_tile + 18);
_s3n = vld1_s32(out_tile + 22);
_s4 = vld1q_s32(out_tile + 24);
_s4n = vld1_s32(out_tile + 28);
_s5 = vld1q_s32(out_tile + 30);
_s5n = vld1_s32(out_tile + 34);
// w = A_T * W
int32x2_t _tp0 = {1, 4};
int32x2_t _tp1 = {2, 8};
// 4*s5[n]
int32x4_t _s5x4 = vshlq_n_s32(_s5, 2);
int32x2_t _s5x4n = vshl_n_s32(_s5n, 2);
int32x4_t _t1p2 = vaddq_s32(_s1, _s2);
int32x2_t _t1p2n = vadd_s32(_s1n, _s2n);
int32x4_t _t3p4 = vaddq_s32(_s3, _s4);
int32x2_t _t3p4n = vadd_s32(_s3n, _s4n);
int32x4_t _t1s2 = vsubq_s32(_s1, _s2);
int32x2_t _t1s2n = vsub_s32(_s1n, _s2n);
int32x4_t _t3s4 = vsubq_s32(_s3, _s4);
int32x2_t _t3s4n = vsub_s32(_s3n, _s4n);
_w0 = vaddq_s32(_s0, _t1p2);
_w0n = vadd_s32(_s0n, _t1p2n);
_w0 = vaddq_s32(_w0, _t3p4);
_w0n = vadd_s32(_w0n, _t3p4n);
_w0n = vmul_s32(_w0n, _tp0);
// _w2,_w2n
_t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1);
_t1p2n = vmla_lane_s32(_t1p2n, _t3p4n, _tp0, 1);
_t1p2n = vmul_s32(_t1p2n, _tp0);
_w3 = vaddq_s32(_s5x4, _t1s2);
_w3n = vadd_s32(_s5x4n, _t1s2n);
_w3 = vmlaq_lane_s32(_w3, _t3s4, _tp1, 1);
_w3n = vmla_lane_s32(_w3n, _t3s4n, _tp1, 1);
_w3n = vmul_s32(_w3n, _tp0);
// _w1, _w1n
_t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0);
_t1s2n = vmla_lane_s32(_t1s2n, _t3s4n, _tp1, 0);
_t1s2n = vmul_s32(_t1s2n, _tp0);
int32x4_t _w02n = vcombine_s32(_w0n, _t1p2n);
int32x4_t _w13n = vcombine_s32(_t1s2n, _w3n);
// transpose w to w_t
#if __aarch64__
int32x4_t _wt0 = vtrn1q_s32(_w0, _t1s2);
int32x4_t _wt1 = vtrn2q_s32(_w0, _t1s2);
int32x4_t _wt2 = vtrn1q_s32(_t1p2, _w3);
int32x4_t _wt3 = vtrn2q_s32(_t1p2, _w3);
int64x2_t _dt0 = vtrn1q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2));
int64x2_t _dt2 = vtrn2q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2));
int64x2_t _dt1 = vtrn1q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3));
int64x2_t _dt3 = vtrn2q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3));
_d0 = vreinterpretq_s32_s64(_dt0);
_d1 = vreinterpretq_s32_s64(_dt1);
_d2 = vreinterpretq_s32_s64(_dt2);
_d3 = vreinterpretq_s32_s64(_dt3);
_d4 = vtrn1q_s32(_w02n, _w13n);
_d5 = vtrn2q_s32(_w02n, _w13n);
#else
asm volatile(
"vtrn.32 %q[_w0], %q[_w1] \n"
"vtrn.32 %q[_w2], %q[_w3] \n"
"vswp %f[_w0], %e[_w2] \n"
"vswp %f[_w1], %e[_w3] \n"
"vtrn.32 %q[_w02n], %q[_w13n] \n"
: [_w0] "+w"(_w0),
[_w1] "+w"(_t1s2),
[_w2] "+w"(_t1p2),
[_w3] "+w"(_w3),
[_w02n] "+w"(_w02n),
[_w13n] "+w"(_w13n)
:
: "cc", "memory");
_d0 = _w0;
_d1 = _t1s2;
_d2 = _t1p2;
_d3 = _w3;
_d4 = _w02n;
_d5 = _w13n;
#endif
// Y = A_T * w_t
_t1p2 = vaddq_s32(_d1, _d2);
_t3p4 = vaddq_s32(_d3, _d4);
_t1s2 = vsubq_s32(_d1, _d2);
_t3s4 = vsubq_s32(_d3, _d4);
_o0 = vaddq_s32(_d0, _t1p2);
_o0 = vaddq_s32(_o0, _t3p4);
// _o2
_t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1);
_o3 = vaddq_s32(_d5, _t1s2);
_o3 = vmlaq_lane_s32(_o3, _t3s4, _tp1, 1);
// _o1
_t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0);
// save to top blob tm
float32x4_t _ot0 = vcvtq_f32_s32(_o0);
float32x4_t _ot1 = vcvtq_f32_s32(_t1s2);
float32x4_t _ot2 = vcvtq_f32_s32(_t1p2);
float32x4_t _ot3 = vcvtq_f32_s32(_o3);
_ot0 = vmulq_n_f32(_ot0, 0.0017361112);
_ot1 = vmulq_n_f32(_ot1, 0.0017361112);
_ot2 = vmulq_n_f32(_ot2, 0.0017361112);
_ot3 = vmulq_n_f32(_ot3, 0.0017361112);
_o0 = vcvtq_s32_f32(_ot0);
_o1 = vcvtq_s32_f32(_ot1);
_o2 = vcvtq_s32_f32(_ot2);
_o3 = vcvtq_s32_f32(_ot3);
vst1q_s32(outRow0, _o0);
vst1q_s32(outRow1, _o1);
vst1q_s32(outRow2, _o2);
vst1q_s32(outRow3, _o3);
#else
int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
int w0[6], w1[6], w2[6], w3[6];
int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
int o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 5; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n];
}
for (int n = 5; n < 6; n++)
{
w0[n] = 4 * (s0[n] + s1[n] + s2[n] + s3[n] + s4[n]);
w1[n] = 4 * (s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]);
w2[n] = 4 * (s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]);
w3[n] = 4 * (s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n]);
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] / 576;
outRow1[n] = o1[n] / 576;
outRow2[n] = o2[n] / 576;
outRow3[n] = o3[n] / 576;
}
#endif // __ARM_NEON
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_dequant_int8_neon(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Mat& _bias, std::vector<float> scales_dequant, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 9, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 4;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
const signed char* r4 = r3 + w;
const signed char* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row<short>(q);
short* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row<short>(q);
short* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row<short>(q);
short* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row<short>(q);
short* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row<short>(q);
short* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row<short>(q);
short* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row<short>(q);
short* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row<short>(q);
short* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row<short>(q);
#if __ARM_NEON
int8x8_t _d0, _d1, _d2, _d3, _d4, _d5;
int16x8_t _w0, _w1, _w2, _w3, _w4, _w5;
int16x8_t _t0, _t1, _t2, _t3, _t4, _t5;
int16x8_t _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = vld1_s8(r0);
_d1 = vld1_s8(r1);
_d2 = vld1_s8(r2);
_d3 = vld1_s8(r3);
_d4 = vld1_s8(r4);
_d5 = vld1_s8(r5);
int8x8_t _1_n = vdup_n_s8(-1);
int8x8_t _2_p = vdup_n_s8(2);
int8x8_t _2_n = vdup_n_s8(-2);
int8x8_t _4_p = vdup_n_s8(4);
int8x8_t _4_n = vdup_n_s8(-4);
int8x8_t _5_n = vdup_n_s8(-5);
int16x8_t _1_n_s16 = vdupq_n_s16(-1);
int16x8_t _2_p_s16 = vdupq_n_s16(2);
int16x8_t _2_n_s16 = vdupq_n_s16(-2);
int16x8_t _4_p_s16 = vdupq_n_s16(4);
int16x8_t _4_n_s16 = vdupq_n_s16(-4);
int16x8_t _5_n_s16 = vdupq_n_s16(-5);
// w = B_t * d
_w0 = vmull_s8(_d0, _4_p);
_w0 = vmlal_s8(_w0, _d2, _5_n);
_w0 = vaddw_s8(_w0, _d4);
_w1 = vmull_s8(_d1, _4_n);
_w1 = vmlal_s8(_w1, _d2, _4_n);
_w1 = vaddw_s8(_w1, _d3);
_w1 = vaddw_s8(_w1, _d4);
_w2 = vmull_s8(_d1, _4_p);
_w2 = vmlal_s8(_w2, _d2, _4_n);
_w2 = vmlal_s8(_w2, _d3, _1_n);
_w2 = vaddw_s8(_w2, _d4);
_w3 = vmull_s8(_d1, _2_n);
_w3 = vmlal_s8(_w3, _d2, _1_n);
_w3 = vmlal_s8(_w3, _d3, _2_p);
_w3 = vaddw_s8(_w3, _d4);
_w4 = vmull_s8(_d1, _2_p);
_w4 = vmlal_s8(_w4, _d2, _1_n);
_w4 = vmlal_s8(_w4, _d3, _2_n);
_w4 = vaddw_s8(_w4, _d4);
_w5 = vmull_s8(_d1, _4_p);
_w5 = vmlal_s8(_w5, _d3, _5_n);
_w5 = vaddw_s8(_w5, _d5);
// transpose d to d_t
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
// d = B_t * d_t
_n0 = vmulq_s16(_t0, _4_p_s16);
_n0 = vmlaq_s16(_n0, _t2, _5_n_s16);
_n0 = vaddq_s16(_n0, _t4);
_n1 = vmulq_s16(_t1, _4_n_s16);
_n1 = vmlaq_s16(_n1, _t2, _4_n_s16);
_n1 = vaddq_s16(_n1, _t3);
_n1 = vaddq_s16(_n1, _t4);
_n2 = vmulq_s16(_t1, _4_p_s16);
_n2 = vmlaq_s16(_n2, _t2, _4_n_s16);
_n2 = vmlaq_s16(_n2, _t3, _1_n_s16);
_n2 = vaddq_s16(_n2, _t4);
_n3 = vmulq_s16(_t1, _2_n_s16);
_n3 = vmlaq_s16(_n3, _t2, _1_n_s16);
_n3 = vmlaq_s16(_n3, _t3, _2_p_s16);
_n3 = vaddq_s16(_n3, _t4);
_n4 = vmulq_s16(_t1, _2_p_s16);
_n4 = vmlaq_s16(_n4, _t2, _1_n_s16);
_n4 = vmlaq_s16(_n4, _t3, _2_n_s16);
_n4 = vaddq_s16(_n4, _t4);
_n5 = vmulq_s16(_t1, _4_p_s16);
_n5 = vmlaq_s16(_n5, _t3, _5_n_s16);
_n5 = vaddq_s16(_n5, _t5);
// save to out_tm
out_tm0[0] = _n0[0];
out_tm0[1] = _n0[1];
out_tm0[2] = _n0[2];
out_tm0[3] = _n0[3];
out_tm1[0] = _n0[4];
out_tm1[1] = _n0[5];
out_tm1[2] = _n1[0];
out_tm1[3] = _n1[1];
out_tm2[0] = _n1[2];
out_tm2[1] = _n1[3];
out_tm2[2] = _n1[4];
out_tm2[3] = _n1[5];
out_tm3[0] = _n2[0];
out_tm3[1] = _n2[1];
out_tm3[2] = _n2[2];
out_tm3[3] = _n2[3];
out_tm4[0] = _n2[4];
out_tm4[1] = _n2[5];
out_tm4[2] = _n3[0];
out_tm4[3] = _n3[1];
out_tm5[0] = _n3[2];
out_tm5[1] = _n3[3];
out_tm5[2] = _n3[4];
out_tm5[3] = _n3[5];
out_tm6[0] = _n4[0];
out_tm6[1] = _n4[1];
out_tm6[2] = _n4[2];
out_tm6[3] = _n4[3];
out_tm7[0] = _n4[4];
out_tm7[1] = _n4[5];
out_tm7[2] = _n5[0];
out_tm7[3] = _n5[1];
out_tm8[0] = _n5[2];
out_tm8[1] = _n5[3];
out_tm8[2] = _n5[4];
out_tm8[3] = _n5[5];
#else
short d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
short w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
short t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __ARM_NEON
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
int* output4_tm = top_blob_tm.channel(p + 4);
int* output5_tm = top_blob_tm.channel(p + 5);
int* output6_tm = top_blob_tm.channel(p + 6);
int* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"mov w4, %w20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%9, #128] \n" // _r0 = vld1_s16(r0);
"ld1 {v8.4h}, [%8] \n"
"ld1 {v9.4h, v10.4h}, [%9] \n" // _k01 = vld1q_s16(kptr);
"add %9, %9, #16 \n"
"ld1 {v11.4h, v12.4h}, [%9] \n" // _k23 = vld1q_s16(kptr+8);
"add %9, %9, #16 \n"
"ld1 {v13.4h, v14.4h}, [%9] \n" // _k45 = vld1q_s16(kptr+16);
"add %9, %9, #16 \n"
"ld1 {v15.4h, v16.4h}, [%9] \n" // _k67 = vld1q_s16(kptr+24);
"add %8, %8, #8 \n"
"add %9, %9, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"smlal v4.4s, v8.4h, v13.4h \n" // sum4 += (a00-a03) * (k40-k43)
"smlal v5.4s, v8.4h, v14.4h \n" // sum5 += (a00-a03) * (k50-k53)
"smlal v6.4s, v8.4h, v15.4h \n" // sum6 += (a00-a03) * (k60-k63)
"smlal v7.4s, v8.4h, v16.4h \n" // sum7 += (a00-a03) * (k70-k73)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
"st1 {v4.4s}, [%4] \n" //
"st1 {v5.4s}, [%5] \n" //
"st1 {v6.4s}, [%6] \n" //
"st1 {v7.4s}, [%7] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"vmov.s32 q4, #0 \n"
"vmov.s32 q5, #0 \n"
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"mov r4, %20 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%8]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%9] \n" // _k01 = vld1q_s16(kptr);
"add %9, #16 \n"
"vld1.s16 {d20-d21}, [%9] \n" // _k23 = vld1q_s16(kptr+8);
"add %9, #16 \n"
"vld1.s16 {d22-d23}, [%9] \n" // _k45 = vld1q_s16(kptr+16);
"add %9, #16 \n"
"vld1.s16 {d24-d25}, [%9] \n" // _k67 = vld1q_s16(kptr+24);
"add %9, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"vmlal.s16 q4, d16, d22 \n" // sum4 += (a00-a03) * (k40-k43)
"vmlal.s16 q5, d16, d23 \n" // sum5 += (a00-a03) * (k50-k53)
"vmlal.s16 q6, d16, d24 \n" // sum6 += (a00-a03) * (k60-k63)
"vmlal.s16 q7, d16, d25 \n" // sum7 += (a00-a03) * (k70-k73)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
"vst1.s32 {d8-d9}, [%4] \n"
"vst1.s32 {d10-d11}, [%5] \n"
"vst1.s32 {d12-d13}, [%6] \n"
"vst1.s32 {d14-d15}, [%7] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(r0), // %8
"=r"(kptr) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(r0),
"9"(kptr),
"r"(inch) // %20
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int sum4[4] = {0};
int sum5[4] = {0};
int sum6[4] = {0};
int sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
sum4[n] += (int)r0[n] * kptr[n + 16];
sum5[n] += (int)r0[n] * kptr[n + 20];
sum6[n] += (int)r0[n] * kptr[n + 24];
sum7[n] += (int)r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __ARM_NEON
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"mov w4, %w12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"prfm pldl1keep, [%5, #128] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v8.4h}, [%4] \n"
"ld1 {v9.4h, v10.4h}, [%5] \n" // _k01 = vld1q_s16(kptr);
"add %5, %5, #16 \n"
"ld1 {v11.4h, v12.4h}, [%5] \n" // _k23 = vld1q_s16(kptr+8);
"add %4, %4, #8 \n"
"add %5, %5, #16 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"smlal v1.4s, v8.4h, v10.4h \n" // sum1 += (a00-a03) * (k10-k13)
"smlal v2.4s, v8.4h, v11.4h \n" // sum2 += (a00-a03) * (k20-k23)
"smlal v3.4s, v8.4h, v12.4h \n" // sum3 += (a00-a03) * (k30-k33)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
"st1 {v1.4s}, [%1] \n" //
"st1 {v2.4s}, [%2] \n" //
"st1 {v3.4s}, [%3] \n" //
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"vmov.s32 q1, #0 \n"
"vmov.s32 q2, #0 \n"
"vmov.s32 q3, #0 \n"
"mov r4, %12 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%4]! \n" // _r0 = vld1_s16(r0); // input inch0
"vld1.s16 {d18-d19}, [%5] \n" // _k01 = vld1q_s16(kptr);
"add %5, #16 \n"
"vld1.s16 {d20-d21}, [%5] \n" // _k23 = vld1q_s16(kptr+8);
"add %5, #16 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"vmlal.s16 q1, d16, d19 \n" // sum1 += (a00-a03) * (k10-k13)
"vmlal.s16 q2, d16, d20 \n" // sum2 += (a00-a03) * (k20-k23)
"vmlal.s16 q3, d16, d21 \n" // sum3 += (a00-a03) * (k30-k33)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
"vst1.s32 {d2-d3}, [%1] \n"
"vst1.s32 {d4-d5}, [%2] \n"
"vst1.s32 {d6-d7}, [%3] \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(kptr) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10");
#endif // __aarch64__
#else
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
sum1[n] += (int)r0[n] * kptr[n + 4];
sum2[n] += (int)r0[n] * kptr[n + 8];
sum3[n] += (int)r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __ARM_NEON
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const short* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const short* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __ARM_NEON
#if __aarch64__
asm volatile(
// inch loop
"eor v0.16b, v0.16b, v0.16b \n"
"mov w4, %w6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"ld1 {v8.4h}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"ld1 {v9.4h}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %1, %1, #8 \n"
"add %2, %2, #8 \n"
"subs w4, w4, #1 \n"
"smlal v0.4s, v8.4h, v9.4h \n" // sum0 += (a00-a03) * (k00-k03)
"bne 0b \n" // end for
"st1 {v0.4s}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9");
#else
asm volatile(
// inch loop
"vmov.s32 q0, #0 \n"
"mov r4, %6 \n"
"0: \n" // for (int q=0; q<inch; q++)
"vld1.s16 {d16}, [%1] \n" // _r0 = vld1_s16(r0); // input inch0
"add %1, #8 \n"
"vld1.s16 {d18}, [%2] \n" // _k0 = vld1q_s16(kptr);
"add %2, #8 \n"
"vmlal.s16 q0, d16, d18 \n" // sum0 += (a00-a03) * (k00-k03)
"subs r4, r4, #1 \n"
"bne 0b \n" // end for
"vst1.s32 {d0-d1}, [%0] \n" // store the result to memory
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(kptr) // %2
: "0"(output0_tm),
"1"(r0),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q8", "q9");
#endif // __aarch64__
#else // __ARM_NEON
int sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __ARM_NEON
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// int* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
// const short* k0 = kernel0_tm.row<short>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* out_tile = top_blob_tm.channel(p);
float* outRow0 = top_blob_bordered.channel(p);
float* outRow1 = outRow0 + outw;
float* outRow2 = outRow0 + outw * 2;
float* outRow3 = outRow0 + outw * 3;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_dequant0 = scales_dequant[p];
const float scale0 = scale_dequant0 / 576.0;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
#if __ARM_NEON
int32x4_t _s0, _s1, _s2, _s3, _s4, _s5;
int32x2_t _s0n, _s1n, _s2n, _s3n, _s4n, _s5n;
int32x4_t _w0, _w3;
int32x2_t _w0n, _w3n;
int32x4_t _d0, _d1, _d2, _d3, _d4, _d5;
int32x4_t _o0, _o3;
// load
_s0 = vld1q_s32(out_tile);
_s0n = vld1_s32(out_tile + 4);
_s1 = vld1q_s32(out_tile + 6);
_s1n = vld1_s32(out_tile + 10);
_s2 = vld1q_s32(out_tile + 12);
_s2n = vld1_s32(out_tile + 16);
_s3 = vld1q_s32(out_tile + 18);
_s3n = vld1_s32(out_tile + 22);
_s4 = vld1q_s32(out_tile + 24);
_s4n = vld1_s32(out_tile + 28);
_s5 = vld1q_s32(out_tile + 30);
_s5n = vld1_s32(out_tile + 34);
// w = A_T * W
int32x2_t _tp0 = {1, 4};
int32x2_t _tp1 = {2, 8};
// 4*s5[n]
int32x4_t _s5x4 = vshlq_n_s32(_s5, 2);
int32x2_t _s5x4n = vshl_n_s32(_s5n, 2);
int32x4_t _t1p2 = vaddq_s32(_s1, _s2);
int32x2_t _t1p2n = vadd_s32(_s1n, _s2n);
int32x4_t _t3p4 = vaddq_s32(_s3, _s4);
int32x2_t _t3p4n = vadd_s32(_s3n, _s4n);
int32x4_t _t1s2 = vsubq_s32(_s1, _s2);
int32x2_t _t1s2n = vsub_s32(_s1n, _s2n);
int32x4_t _t3s4 = vsubq_s32(_s3, _s4);
int32x2_t _t3s4n = vsub_s32(_s3n, _s4n);
_w0 = vaddq_s32(_s0, _t1p2);
_w0n = vadd_s32(_s0n, _t1p2n);
_w0 = vaddq_s32(_w0, _t3p4);
_w0n = vadd_s32(_w0n, _t3p4n);
_w0n = vmul_s32(_w0n, _tp0);
// _w2,_w2n
_t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1);
_t1p2n = vmla_lane_s32(_t1p2n, _t3p4n, _tp0, 1);
_t1p2n = vmul_s32(_t1p2n, _tp0);
_w3 = vaddq_s32(_s5x4, _t1s2);
_w3n = vadd_s32(_s5x4n, _t1s2n);
_w3 = vmlaq_lane_s32(_w3, _t3s4, _tp1, 1);
_w3n = vmla_lane_s32(_w3n, _t3s4n, _tp1, 1);
_w3n = vmul_s32(_w3n, _tp0);
// _w1, _w1n
_t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0);
_t1s2n = vmla_lane_s32(_t1s2n, _t3s4n, _tp1, 0);
_t1s2n = vmul_s32(_t1s2n, _tp0);
int32x4_t _w02n = vcombine_s32(_w0n, _t1p2n);
int32x4_t _w13n = vcombine_s32(_t1s2n, _w3n);
// transpose w to w_t
#if __aarch64__
int32x4_t _wt0 = vtrn1q_s32(_w0, _t1s2);
int32x4_t _wt1 = vtrn2q_s32(_w0, _t1s2);
int32x4_t _wt2 = vtrn1q_s32(_t1p2, _w3);
int32x4_t _wt3 = vtrn2q_s32(_t1p2, _w3);
int64x2_t _dt0 = vtrn1q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2));
int64x2_t _dt2 = vtrn2q_s64(vreinterpretq_s64_s32(_wt0), vreinterpretq_s64_s32(_wt2));
int64x2_t _dt1 = vtrn1q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3));
int64x2_t _dt3 = vtrn2q_s64(vreinterpretq_s64_s32(_wt1), vreinterpretq_s64_s32(_wt3));
_d0 = vreinterpretq_s32_s64(_dt0);
_d1 = vreinterpretq_s32_s64(_dt1);
_d2 = vreinterpretq_s32_s64(_dt2);
_d3 = vreinterpretq_s32_s64(_dt3);
_d4 = vtrn1q_s32(_w02n, _w13n);
_d5 = vtrn2q_s32(_w02n, _w13n);
#else
asm volatile(
"vtrn.32 %q[_w0], %q[_w1] \n"
"vtrn.32 %q[_w2], %q[_w3] \n"
"vswp %f[_w0], %e[_w2] \n"
"vswp %f[_w1], %e[_w3] \n"
"vtrn.32 %q[_w02n], %q[_w13n] \n"
: [_w0] "+w"(_w0),
[_w1] "+w"(_t1s2),
[_w2] "+w"(_t1p2),
[_w3] "+w"(_w3),
[_w02n] "+w"(_w02n),
[_w13n] "+w"(_w13n)
:
: "cc", "memory");
_d0 = _w0;
_d1 = _t1s2;
_d2 = _t1p2;
_d3 = _w3;
_d4 = _w02n;
_d5 = _w13n;
#endif
// Y = A_T * w_t
_t1p2 = vaddq_s32(_d1, _d2);
_t3p4 = vaddq_s32(_d3, _d4);
_t1s2 = vsubq_s32(_d1, _d2);
_t3s4 = vsubq_s32(_d3, _d4);
_o0 = vaddq_s32(_d0, _t1p2);
_o0 = vaddq_s32(_o0, _t3p4);
// _o2
_t1p2 = vmlaq_lane_s32(_t1p2, _t3p4, _tp0, 1);
_o3 = vaddq_s32(_d5, _t1s2);
_o3 = vmlaq_lane_s32(_o3, _t3s4, _tp1, 1);
// _o1
_t1s2 = vmlaq_lane_s32(_t1s2, _t3s4, _tp1, 0);
// save to top blob tm
float32x4_t _scale0 = vdupq_n_f32(scale0);
float32x4_t _out0_f32 = vdupq_n_f32(bias0);
float32x4_t _out1_f32 = vdupq_n_f32(bias0);
float32x4_t _out2_f32 = vdupq_n_f32(bias0);
float32x4_t _out3_f32 = vdupq_n_f32(bias0);
_out0_f32 = vmlaq_f32(_out0_f32, vcvtq_f32_s32(_o0), _scale0);
_out1_f32 = vmlaq_f32(_out1_f32, vcvtq_f32_s32(_t1s2), _scale0);
_out2_f32 = vmlaq_f32(_out2_f32, vcvtq_f32_s32(_t1p2), _scale0);
_out3_f32 = vmlaq_f32(_out3_f32, vcvtq_f32_s32(_o3), _scale0);
vst1q_f32(outRow0, _out0_f32);
vst1q_f32(outRow1, _out1_f32);
vst1q_f32(outRow2, _out2_f32);
vst1q_f32(outRow3, _out3_f32);
#else
int s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
int w0[6], w1[6], w2[6], w3[6];
int d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
int o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 5; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n];
}
for (int n = 5; n < 6; n++)
{
w0[n] = 4 * (s0[n] + s1[n] + s2[n] + s3[n] + s4[n]);
w1[n] = 4 * (s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]);
w2[n] = 4 * (s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]);
w3[n] = 4 * (s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + 4 * s5[n]);
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = (float)o0[n] * scale0 + bias0;
outRow1[n] = (float)o1[n] * scale0 + bias0;
outRow2[n] = (float)o2[n] * scale0 + bias0;
outRow3[n] = (float)o3[n] * scale0 + bias0;
}
#endif // __ARM_NEON
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(8 * 9, inch, outch / 8 + outch % 8, (size_t)1u);
const signed char* kernel = _kernel;
int p = 0;
for (; p + 7 < outch; p += 8)
{
const signed char* k0 = kernel + (p + 0) * inch * 9;
const signed char* k1 = kernel + (p + 1) * inch * 9;
const signed char* k2 = kernel + (p + 2) * inch * 9;
const signed char* k3 = kernel + (p + 3) * inch * 9;
const signed char* k4 = kernel + (p + 4) * inch * 9;
const signed char* k5 = kernel + (p + 5) * inch * 9;
const signed char* k6 = kernel + (p + 6) * inch * 9;
const signed char* k7 = kernel + (p + 7) * inch * 9;
signed char* ktmp = kernel_tm.channel(p / 8);
for (int q = 0; q < inch; q++)
{
for (int k = 0; k < 9; k++)
{
ktmp[0] = k0[k];
ktmp[1] = k1[k];
ktmp[2] = k2[k];
ktmp[3] = k3[k];
ktmp[4] = k4[k];
ktmp[5] = k5[k];
ktmp[6] = k6[k];
ktmp[7] = k7[k];
ktmp += 8;
}
k0 += 9;
k1 += 9;
k2 += 9;
k3 += 9;
k4 += 9;
k5 += 9;
k6 += 9;
k7 += 9;
}
}
for (; p < outch; p++)
{
const signed char* k0 = kernel + (p + 0) * inch * 9;
signed char* ktmp = kernel_tm.channel(p / 8 + p % 8);
for (int q = 0; q < inch; q++)
{
for (int k = 0; k < 9; k++)
{
ktmp[k] = k0[k];
}
ktmp += 9;
k0 += 9;
}
}
}
static void conv3x3s2_packed_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
Mat out0 = top_blob.channel(p + 0);
Mat out1 = top_blob.channel(p + 1);
Mat out2 = top_blob.channel(p + 2);
Mat out3 = top_blob.channel(p + 3);
Mat out4 = top_blob.channel(p + 4);
Mat out5 = top_blob.channel(p + 5);
Mat out6 = top_blob.channel(p + 6);
Mat out7 = top_blob.channel(p + 7);
out0.fill(0);
out1.fill(0);
out2.fill(0);
out3.fill(0);
out4.fill(0);
out5.fill(0);
out6.fill(0);
out7.fill(0);
const signed char* ktmp = _kernel.channel(p / 8);
for (int q = 0; q < inch; q++)
{
int* outptr0 = out0;
int* outptr1 = out1;
int* outptr2 = out2;
int* outptr3 = out3;
int* outptr4 = out4;
int* outptr5 = out5;
int* outptr6 = out6;
int* outptr7 = out7;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
#if __aarch64__
int nn = outw >> 3;
int remain = outw & 7;
#else
int nn = outw >> 2;
int remain = outw & 3;
#endif // __aarch64__
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n" //ktmp
"ld2 {v3.8b, v4.8b}, [%9], #16 \n" //r0-r2
"ld2 {v5.8b, v6.8b}, [%9] \n"
"ld1 {v8.4s, v9.4s}, [%1] \n" //out0
"ld1 {v10.4s, v11.4s}, [%2] \n" //out1
"ld1 {v12.4s, v13.4s}, [%3] \n" //out2
"ld1 {v14.4s, v15.4s}, [%4] \n" //out3
"ld1 {v16.4s, v17.4s}, [%5] \n" //out4
"ld1 {v18.4s, v19.4s}, [%6] \n" //out5
"ld1 {v20.4s, v21.4s}, [%7] \n" //out6
"ld1 {v22.4s, v23.4s}, [%8] \n" //out7
"ext v7.8b, v3.8b, v5.8b, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" //(k00-k70)
"sshll v1.8h, v1.8b, #0 \n" //(k01-k71)
"sshll v2.8h, v2.8b, #0 \n" //(k02-k72)
"sshll v3.8h, v3.8b, #0 \n" // r0
"sshll v4.8h, v4.8b, #0 \n" // r1
"sshll v7.8h, v7.8b, #0 \n" // r2
// r0
"smlal v8.4s, v3.4h, v0.h[0] \n" // out0 += (r00-r07)*k00
"smlal2 v9.4s, v3.8h, v0.h[0] \n"
"smlal v10.4s, v3.4h, v0.h[1] \n" // out1 += (r00-r07)*k10
"smlal2 v11.4s, v3.8h, v0.h[1] \n"
"smlal v12.4s, v3.4h, v0.h[2] \n" // out2 += (r00-r07)*k20
"smlal2 v13.4s, v3.8h, v0.h[2] \n"
"smlal v14.4s, v3.4h, v0.h[3] \n" // out3 += (r00-r07)*k30
"smlal2 v15.4s, v3.8h, v0.h[3] \n"
"smlal v16.4s, v3.4h, v0.h[4] \n" // out4 += (r00-r07)*k40
"smlal2 v17.4s, v3.8h, v0.h[4] \n"
"smlal v18.4s, v3.4h, v0.h[5] \n" // out5 += (r00-r07)*k50
"smlal2 v19.4s, v3.8h, v0.h[5] \n"
"smlal v20.4s, v3.4h, v0.h[6] \n" // out6 += (r00-r07)*k60
"smlal2 v21.4s, v3.8h, v0.h[6] \n"
"smlal v22.4s, v3.4h, v0.h[7] \n" // out7 += (r00-r07)*k70
"smlal2 v23.4s, v3.8h, v0.h[7] \n"
// r1
"smlal v8.4s, v4.4h, v1.h[0] \n" // out0 += (r10-r17)*k01
"smlal2 v9.4s, v4.8h, v1.h[0] \n"
"smlal v10.4s, v4.4h, v1.h[1] \n" // out1 += (r10-r17)*k11
"smlal2 v11.4s, v4.8h, v1.h[1] \n"
"smlal v12.4s, v4.4h, v1.h[2] \n" // out2 += (r10-r17)*k21
"smlal2 v13.4s, v4.8h, v1.h[2] \n"
"smlal v14.4s, v4.4h, v1.h[3] \n" // out3 += (r10-r17)*k31
"smlal2 v15.4s, v4.8h, v1.h[3] \n"
"smlal v16.4s, v4.4h, v1.h[4] \n" // out4 += (r10-r17)*k41
"smlal2 v17.4s, v4.8h, v1.h[4] \n"
"smlal v18.4s, v4.4h, v1.h[5] \n" // out5 += (r10-r17)*k51
"smlal2 v19.4s, v4.8h, v1.h[5] \n"
"smlal v20.4s, v4.4h, v1.h[6] \n" // out6 += (r10-r17)*k61
"smlal2 v21.4s, v4.8h, v1.h[6] \n"
"smlal v22.4s, v4.4h, v1.h[7] \n" // out7 += (r10-r17)*k71
"smlal2 v23.4s, v4.8h, v1.h[7] \n"
// r2
"smlal v8.4s, v7.4h, v2.h[0] \n" // out0 += (r20-r27)*k02
"smlal2 v9.4s, v7.8h, v2.h[0] \n"
"smlal v10.4s, v7.4h, v2.h[1] \n" // out1 += (r20-r27)*k12
"smlal2 v11.4s, v7.8h, v2.h[1] \n"
"smlal v12.4s, v7.4h, v2.h[2] \n" // out2 += (r20-r27)*k22
"smlal2 v13.4s, v7.8h, v2.h[2] \n"
"smlal v14.4s, v7.4h, v2.h[3] \n" // out3 += (r20-r27)*k32
"smlal2 v15.4s, v7.8h, v2.h[3] \n"
"smlal v16.4s, v7.4h, v2.h[4] \n" // out4 += (r20-r27)*k42
"smlal2 v17.4s, v7.8h, v2.h[4] \n"
"smlal v18.4s, v7.4h, v2.h[5] \n" // out5 += (r20-r27)*k52
"smlal2 v19.4s, v7.8h, v2.h[5] \n"
"smlal v20.4s, v7.4h, v2.h[6] \n" // out6 += (r20-r27)*k62
"smlal2 v21.4s, v7.8h, v2.h[6] \n"
"smlal v22.4s, v7.4h, v2.h[7] \n" // out7 += (r20-r27)*k72
"smlal2 v23.4s, v7.8h, v2.h[7] \n"
"ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n" //ktmp
"ld2 {v3.8b, v4.8b}, [%10], #16 \n" //r3-r5
"ld2 {v5.8b, v6.8b}, [%10] \n"
"ext v7.8b, v3.8b, v5.8b, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" //(k03-k73)
"sshll v1.8h, v1.8b, #0 \n" //(k04-k74)
"sshll v2.8h, v2.8b, #0 \n" //(k05-k75)
"sshll v3.8h, v3.8b, #0 \n" // r3
"sshll v4.8h, v4.8b, #0 \n" // r4
"sshll v7.8h, v7.8b, #0 \n" // r5
// r3
"smlal v8.4s, v3.4h, v0.h[0] \n" // out0 += (r30-r37)*k03
"smlal2 v9.4s, v3.8h, v0.h[0] \n"
"smlal v10.4s, v3.4h, v0.h[1] \n" // out1 += (r30-r37)*k13
"smlal2 v11.4s, v3.8h, v0.h[1] \n"
"smlal v12.4s, v3.4h, v0.h[2] \n" // out2 += (r30-r37)*k23
"smlal2 v13.4s, v3.8h, v0.h[2] \n"
"smlal v14.4s, v3.4h, v0.h[3] \n" // out3 += (r30-r37)*k33
"smlal2 v15.4s, v3.8h, v0.h[3] \n"
"smlal v16.4s, v3.4h, v0.h[4] \n" // out4 += (r30-r37)*k43
"smlal2 v17.4s, v3.8h, v0.h[4] \n"
"smlal v18.4s, v3.4h, v0.h[5] \n" // out5 += (r30-r37)*k53
"smlal2 v19.4s, v3.8h, v0.h[5] \n"
"smlal v20.4s, v3.4h, v0.h[6] \n" // out6 += (r30-r37)*k63
"smlal2 v21.4s, v3.8h, v0.h[6] \n"
"smlal v22.4s, v3.4h, v0.h[7] \n" // out7 += (r30-r37)*k73
"smlal2 v23.4s, v3.8h, v0.h[7] \n"
// r4
"smlal v8.4s, v4.4h, v1.h[0] \n" // out0 += (r40-r47)*k04
"smlal2 v9.4s, v4.8h, v1.h[0] \n"
"smlal v10.4s, v4.4h, v1.h[1] \n" // out1 += (r40-r47)*k14
"smlal2 v11.4s, v4.8h, v1.h[1] \n"
"smlal v12.4s, v4.4h, v1.h[2] \n" // out2 += (r40-r47)*k24
"smlal2 v13.4s, v4.8h, v1.h[2] \n"
"smlal v14.4s, v4.4h, v1.h[3] \n" // out3 += (r40-r47)*k34
"smlal2 v15.4s, v4.8h, v1.h[3] \n"
"smlal v16.4s, v4.4h, v1.h[4] \n" // out4 += (r40-r47)*k44
"smlal2 v17.4s, v4.8h, v1.h[4] \n"
"smlal v18.4s, v4.4h, v1.h[5] \n" // out5 += (r40-r47)*k54
"smlal2 v19.4s, v4.8h, v1.h[5] \n"
"smlal v20.4s, v4.4h, v1.h[6] \n" // out6 += (r40-r47)*k64
"smlal2 v21.4s, v4.8h, v1.h[6] \n"
"smlal v22.4s, v4.4h, v1.h[7] \n" // out7 += (r40-r47)*k74
"smlal2 v23.4s, v4.8h, v1.h[7] \n"
// r5
"smlal v8.4s, v7.4h, v2.h[0] \n" // out0 += (r50-r57)*k05
"smlal2 v9.4s, v7.8h, v2.h[0] \n"
"smlal v10.4s, v7.4h, v2.h[1] \n" // out1 += (r50-r57)*k15
"smlal2 v11.4s, v7.8h, v2.h[1] \n"
"smlal v12.4s, v7.4h, v2.h[2] \n" // out2 += (r50-r57)*k25
"smlal2 v13.4s, v7.8h, v2.h[2] \n"
"smlal v14.4s, v7.4h, v2.h[3] \n" // out3 += (r50-r57)*k35
"smlal2 v15.4s, v7.8h, v2.h[3] \n"
"smlal v16.4s, v7.4h, v2.h[4] \n" // out4 += (r50-r57)*k45
"smlal2 v17.4s, v7.8h, v2.h[4] \n"
"smlal v18.4s, v7.4h, v2.h[5] \n" // out5 += (r50-r57)*k55
"smlal2 v19.4s, v7.8h, v2.h[5] \n"
"smlal v20.4s, v7.4h, v2.h[6] \n" // out6 += (r50-r57)*k65
"smlal2 v21.4s, v7.8h, v2.h[6] \n"
"smlal v22.4s, v7.4h, v2.h[7] \n" // out7 += (r50-r57)*k75
"smlal2 v23.4s, v7.8h, v2.h[7] \n"
"ld1 {v0.8b, v1.8b, v2.8b}, [%12], #24 \n" //ktmp
"ld2 {v3.8b, v4.8b}, [%11], #16 \n" //r6-r8
"ld2 {v5.8b, v6.8b}, [%11] \n"
"ext v7.8b, v3.8b, v5.8b, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" //(k06-k76)
"sshll v1.8h, v1.8b, #0 \n" //(k07-k77)
"sshll v2.8h, v2.8b, #0 \n" //(k08-k78)
"sshll v3.8h, v3.8b, #0 \n" // r6
"sshll v4.8h, v4.8b, #0 \n" // r7
"sshll v7.8h, v7.8b, #0 \n" // r8
// r6
"smlal v8.4s, v3.4h, v0.h[0] \n" // out0 += (r60-r67)*k06
"smlal2 v9.4s, v3.8h, v0.h[0] \n"
"smlal v10.4s, v3.4h, v0.h[1] \n" // out1 += (r60-r67)*k16
"smlal2 v11.4s, v3.8h, v0.h[1] \n"
"smlal v12.4s, v3.4h, v0.h[2] \n" // out2 += (r60-r67)*k26
"smlal2 v13.4s, v3.8h, v0.h[2] \n"
"smlal v14.4s, v3.4h, v0.h[3] \n" // out3 += (r60-r67)*k36
"smlal2 v15.4s, v3.8h, v0.h[3] \n"
"smlal v16.4s, v3.4h, v0.h[4] \n" // out4 += (r60-r67)*k46
"smlal2 v17.4s, v3.8h, v0.h[4] \n"
"smlal v18.4s, v3.4h, v0.h[5] \n" // out5 += (r60-r67)*k56
"smlal2 v19.4s, v3.8h, v0.h[5] \n"
"smlal v20.4s, v3.4h, v0.h[6] \n" // out6 += (r60-r67)*k66
"smlal2 v21.4s, v3.8h, v0.h[6] \n"
"smlal v22.4s, v3.4h, v0.h[7] \n" // out7 += (r60-r67)*k76
"smlal2 v23.4s, v3.8h, v0.h[7] \n"
// r7
"smlal v8.4s, v4.4h, v1.h[0] \n" // out0 += (r70-r77)*k07
"smlal2 v9.4s, v4.8h, v1.h[0] \n"
"smlal v10.4s, v4.4h, v1.h[1] \n" // out1 += (r70-r77)*k17
"smlal2 v11.4s, v4.8h, v1.h[1] \n"
"smlal v12.4s, v4.4h, v1.h[2] \n" // out2 += (r70-r77)*k27
"smlal2 v13.4s, v4.8h, v1.h[2] \n"
"smlal v14.4s, v4.4h, v1.h[3] \n" // out3 += (r70-r77)*k37
"smlal2 v15.4s, v4.8h, v1.h[3] \n"
"smlal v16.4s, v4.4h, v1.h[4] \n" // out4 += (r70-r77)*k47
"smlal2 v17.4s, v4.8h, v1.h[4] \n"
"smlal v18.4s, v4.4h, v1.h[5] \n" // out5 += (r70-r77)*k57
"smlal2 v19.4s, v4.8h, v1.h[5] \n"
"smlal v20.4s, v4.4h, v1.h[6] \n" // out6 += (r70-r77)*k67
"smlal2 v21.4s, v4.8h, v1.h[6] \n"
"smlal v22.4s, v4.4h, v1.h[7] \n" // out7 += (r70-r77)*k77
"smlal2 v23.4s, v4.8h, v1.h[7] \n"
// r8
"smlal v8.4s, v7.4h, v2.h[0] \n" // out0 += (r80-r87)*k08
"smlal2 v9.4s, v7.8h, v2.h[0] \n"
"smlal v10.4s, v7.4h, v2.h[1] \n" // out1 += (r80-r87)*k18
"smlal2 v11.4s, v7.8h, v2.h[1] \n"
"smlal v12.4s, v7.4h, v2.h[2] \n" // out2 += (r80-r87)*k28
"smlal2 v13.4s, v7.8h, v2.h[2] \n"
"smlal v14.4s, v7.4h, v2.h[3] \n" // out3 += (r80-r87)*k38
"smlal2 v15.4s, v7.8h, v2.h[3] \n"
"smlal v16.4s, v7.4h, v2.h[4] \n" // out4 += (r80-r87)*k48
"smlal2 v17.4s, v7.8h, v2.h[4] \n"
"smlal v18.4s, v7.4h, v2.h[5] \n" // out5 += (r80-r87)*k58
"smlal2 v19.4s, v7.8h, v2.h[5] \n"
"smlal v20.4s, v7.4h, v2.h[6] \n" // out6 += (r80-r87)*k68
"smlal2 v21.4s, v7.8h, v2.h[6] \n"
"smlal v22.4s, v7.4h, v2.h[7] \n" // out7 += (r80-r87)*k78
"smlal2 v23.4s, v7.8h, v2.h[7] \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"st1 {v10.4s, v11.4s}, [%2], #32 \n"
"st1 {v12.4s, v13.4s}, [%3], #32 \n"
"st1 {v14.4s, v15.4s}, [%4], #32 \n"
"st1 {v16.4s, v17.4s}, [%5], #32 \n"
"st1 {v18.4s, v19.4s}, [%6], #32 \n"
"st1 {v20.4s, v21.4s}, [%7], #32 \n"
"st1 {v22.4s, v23.4s}, [%8], #32 \n"
"subs %w0, %w0, #1 \n"
"sub %12, %12, #72 \n" // reset ktmp
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(ktmp) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(ktmp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
#else // __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #128] \n"
"vld1.s32 {d16-d17}, [%1] \n" // out0
"pld [%2, #128] \n"
"vld1.s32 {d18-d19}, [%2] \n" // out1
"pld [%3, #128] \n"
"vld1.s32 {d20-d21}, [%3] \n" // out2
"pld [%4, #128] \n"
"vld1.s32 {d22-d23}, [%4] \n" // out3
// r0
"pld [%9, #64] \n"
"vld2.s8 {d8-d9}, [%9] \n" // d8(a00 a02 a04 a06 a08 a010 a012 a014), d9(a01 a03 a05 a07 a09 a011 a013 a015)
"add %9, #8 \n"
"pld [%12, #64] \n"
"vld1.s8 {d0-d2}, [%12]! \n" // d0(k00-k70) d1(k01-k71) d2(k02-k72)
"pld [%5, #128] \n"
"vld1.s32 {d24-d25}, [%5] \n" // out4
"pld [%6, #128] \n"
"vld1.s32 {d26-d27}, [%6] \n" // out5
"vmovl.s8 q2, d2 \n" // q2(k02-k72)
"vmovl.s8 q1, d1 \n" // q1(k01-k71)
"vmovl.s8 q0, d0 \n" // q0(k00-k70)
"vext.s8 d12, d8, d8, #1 \n" // d12(a02 a04 a06 a08 x x x x)
"pld [%7, #128] \n"
"vld1.s32 {d28-d29}, [%7] \n" // out6
"vmovl.s8 q5, d9 \n" // q5(a01 a03 a05 a07 a09 a011 a013 a015) d11
"vmovl.s8 q4, d8 \n" // q4(a00 a02 a04 a06 a08 a010 a012 a014) d9
"vmovl.s8 q6, d12 \n" // q6(a02 a04 a06 a08 a010 a012 a014 a016) d13
"pld [%8, #128] \n"
"vld1.s32 {d30-d31}, [%8] \n" // out7
"vmlal.s16 q8, d8, d0[0] \n" // sum0 += (a00 a02 a04 a06) * k00
"vmlal.s16 q9, d8, d0[1] \n" // sum1 += (a00 a02 a04 a06) * k10
"vmlal.s16 q10, d8, d0[2] \n" // sum2 += (a00 a02 a04 a06) * k20
"vmlal.s16 q11, d8, d0[3] \n" // sum3 += (a00 a02 a04 a06) * k30
"vmlal.s16 q12, d8, d1[0] \n" // sum4 += (a00 a02 a04 a06) * k40
"vmlal.s16 q13, d8, d1[1] \n" // sum5 += (a00 a02 a04 a06) * k50
"vmlal.s16 q14, d8, d1[2] \n" // sum6 += (a00 a02 a04 a06) * k60
"vmlal.s16 q15, d8, d1[3] \n" // sum7 += (a00 a02 a04 a06) * k70
"vmlal.s16 q8, d10, d2[0] \n" // sum0 += (a01-a07) * k01
"vmlal.s16 q9, d10, d2[1] \n" // sum1 += (a01-a07) * k11
"vmlal.s16 q10, d10, d2[2] \n" // sum2 += (a01-a07) * k21
"vmlal.s16 q11, d10, d2[3] \n" // sum3 += (a01-a07) * k31
"vmlal.s16 q12, d10, d3[0] \n" // sum4 += (a01-a07) * k41
"vmlal.s16 q13, d10, d3[1] \n" // sum5 += (a01-a07) * k51
"vmlal.s16 q14, d10, d3[2] \n" // sum6 += (a01-a07) * k61
"vmlal.s16 q15, d10, d3[3] \n" // sum7 += (a01-a07) * k71
"pld [%10, #64] \n"
"vld2.s8 {d8-d9}, [%10] \n" // d8(a10 a12 a14 a16 a18 a110 a112 a114), d9(a11 a13 a15 a17 a19 a111 a113 a115)
"add %10, #8 \n"
"vmlal.s16 q8, d12, d4[0] \n" // sum0 += (a02-a08) * k02
"vmlal.s16 q9, d12, d4[1] \n" // sum1 += (a02-a08) * k12
"vmlal.s16 q10, d12, d4[2] \n" // sum2 += (a02-a08) * k22
"vmlal.s16 q11, d12, d4[3] \n" // sum3 += (a02-a08) * k32
"pld [%12, #64] \n"
"vld1.s8 {d0-d2}, [%12]! \n" // d0(k03-k73) d1(k04-k74) d2(k05-k75)
"vmlal.s16 q12, d12, d5[0] \n" // sum4 += (a02-a08) * k42
"vmlal.s16 q13, d12, d5[1] \n" // sum5 += (a02-a08) * k52
"vmlal.s16 q14, d12, d5[2] \n" // sum6 += (a02-a08) * k62
"vmlal.s16 q15, d12, d5[3] \n" // sum7 += (a02-a08) * k72
// r1
"vext.s8 d12, d8, d8, #1 \n" // d12(a12 a14 a16 a18 x x x x)
"vmovl.s8 q2, d2 \n" // q2(k05-k75)
"vmovl.s8 q1, d1 \n" // q1(k04-k74)
"vmovl.s8 q0, d0 \n" // q0(k03-k73)
"vmovl.s8 q5, d9 \n" // q5(a11-a115)
"vmovl.s8 q4, d8 \n" // q4(a10-a114)
"vmovl.s8 q6, d12 \n" // q6(a12-a116)
"vmlal.s16 q8, d8, d0[0] \n" // sum0 += (a10-a16) * k03
"vmlal.s16 q9, d8, d0[1] \n" // sum1 += (a10-a16) * k13
"vmlal.s16 q10, d8, d0[2] \n" // sum2 += (a10-a16) * k23
"vmlal.s16 q11, d8, d0[3] \n" // sum3 += (a10-a16) * k33
"vmlal.s16 q12, d8, d1[0] \n" // sum4 += (a10-a16) * k43
"vmlal.s16 q13, d8, d1[1] \n" // sum5 += (a10-a16) * k53
"vmlal.s16 q14, d8, d1[2] \n" // sum6 += (a10-a16) * k63
"vmlal.s16 q15, d8, d1[3] \n" // sum7 += (a10-a16) * k73
"vmlal.s16 q8, d10, d2[0] \n" // sum0 += (a11-a17) * k04
"vmlal.s16 q9, d10, d2[1] \n" // sum1 += (a11-a17) * k14
"vmlal.s16 q10, d10, d2[2] \n" // sum2 += (a11-a17) * k24
"vmlal.s16 q11, d10, d2[3] \n" // sum3 += (a11-a17) * k34
"vmlal.s16 q12, d10, d3[0] \n" // sum4 += (a11-a17) * k44
"vmlal.s16 q13, d10, d3[1] \n" // sum5 += (a11-a17) * k54
"vmlal.s16 q14, d10, d3[2] \n" // sum6 += (a11-a17) * k64
"vmlal.s16 q15, d10, d3[3] \n" // sum7 += (a11-a17) * k74
"pld [%11, #64] \n"
"vld2.s8 {d8-d9}, [%11] \n" // d8(a20 a22 a24 a26 a28 a210 a212 a214), d9(a21 a23 a25 a27 a29 a211 a213 a215)
"add %11, #8 \n"
"vmlal.s16 q8, d12, d4[0] \n" // sum0 += (a12-a18) * k05
"vmlal.s16 q9, d12, d4[1] \n" // sum1 += (a12-a18) * k15
"vmlal.s16 q10, d12, d4[2] \n" // sum2 += (a12-a18) * k25
"vmlal.s16 q11, d12, d4[3] \n" // sum3 += (a12-a18) * k35
"pld [%12, #64] \n"
"vld1.s8 {d0-d2}, [%12]! \n" // d0(k06-k76) d1(k07-k77) d2(k08-k78)
"vmlal.s16 q12, d12, d5[0] \n" // sum4 += (a12-a18) * k45
"vmlal.s16 q13, d12, d5[1] \n" // sum5 += (a12-a18) * k55
"vmlal.s16 q14, d12, d5[2] \n" // sum6 += (a12-a18) * k65
"vmlal.s16 q15, d12, d5[3] \n" // sum7 += (a12-a18) * k75
// r2
"vext.s8 d12, d8, d8, #1 \n" // d12(a22 a24 a26 a28 x x x x)
"vmovl.s8 q2, d2 \n" // q2(k08-k78)
"vmovl.s8 q1, d1 \n" // q1(k07-k77)
"vmovl.s8 q0, d0 \n" // q0(k06-k76)
"vmovl.s8 q5, d9 \n" // q5(a21-a215)
"vmovl.s8 q4, d8 \n" // q4(a20-a214)
"vmovl.s8 q6, d12 \n" // q6(a22-a216)
"vmlal.s16 q8, d8, d0[0] \n" // sum0 += (a20-a26) * k06
"vmlal.s16 q9, d8, d0[1] \n" // sum1 += (a20-a26) * k16
"vmlal.s16 q10, d8, d0[2] \n" // sum2 += (a20-a26) * k26
"vmlal.s16 q11, d8, d0[3] \n" // sum3 += (a20-a26) * k36
"vmlal.s16 q12, d8, d1[0] \n" // sum4 += (a20-a26) * k46
"vmlal.s16 q13, d8, d1[1] \n" // sum5 += (a20-a26) * k56
"vmlal.s16 q14, d8, d1[2] \n" // sum6 += (a20-a26) * k66
"vmlal.s16 q15, d8, d1[3] \n" // sum7 += (a20-a26) * k76
"vmlal.s16 q8, d10, d2[0] \n" // sum0 += (a21-a27) * k07
"vmlal.s16 q9, d10, d2[1] \n" // sum1 += (a21-a27) * k17
"vmlal.s16 q10, d10, d2[2] \n" // sum2 += (a21-a27) * k27
"vmlal.s16 q11, d10, d2[3] \n" // sum3 += (a21-a27) * k37
"vmlal.s16 q12, d10, d3[0] \n" // sum4 += (a21-a27) * k47
"vmlal.s16 q13, d10, d3[1] \n" // sum5 += (a21-a27) * k57
"vmlal.s16 q14, d10, d3[2] \n" // sum6 += (a21-a27) * k67
"vmlal.s16 q15, d10, d3[3] \n" // sum7 += (a21-a27) * k77
"vmlal.s16 q8, d12, d4[0] \n" // sum0 += (a22-a28) * k08
"vmlal.s16 q9, d12, d4[1] \n" // sum1 += (a22-a28) * k18
"vmlal.s16 q10, d12, d4[2] \n" // sum2 += (a22-a28) * k28
"vmlal.s16 q11, d12, d4[3] \n" // sum3 += (a22-a28) * k38
"vmlal.s16 q12, d12, d5[0] \n" // sum4 += (a22-a28) * k48
"vmlal.s16 q13, d12, d5[1] \n" // sum5 += (a22-a28) * k58
"vmlal.s16 q14, d12, d5[2] \n" // sum6 += (a22-a28) * k68
"vmlal.s16 q15, d12, d5[3] \n" // sum7 += (a22-a28) * k78
// save s32 to memory
"sub %12, %12, #72 \n"
"vst1.s32 {d16-d17}, [%1]! \n" // out0
"vst1.s32 {d18-d19}, [%2]! \n" // out1
"vst1.s32 {d20-d21}, [%3]! \n" // out2
"vst1.s32 {d22-d23}, [%4]! \n" // out3
"subs %0, #1 \n"
"vst1.s32 {d24-d25}, [%5]! \n" // out4
"vst1.s32 {d26-d27}, [%6]! \n" // out5
"vst1.s32 {d28-d29}, [%7]! \n" // out6
"vst1.s32 {d30-d31}, [%8]! \n" // out7
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(ktmp) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
#if __ARM_NEON
#if __aarch64__
int8x8_t _r0_s8 = vld1_s8(r0); // (a00 a01 a02 ....)
int8x8_t _r1_s8 = vld1_s8(r1); // (a10 a11 a12 ....)
int8x8_t _r2_s8 = vld1_s8(r2); // (a20 a21 a22 ....)
int16x8_t _r0 = vmovl_s8(_r0_s8);
int16x8_t _r1 = vmovl_s8(_r1_s8);
int16x8_t _r2 = vmovl_s8(_r2_s8);
int32x4_t _sum03 = {};
int32x4_t _sum47 = {};
_sum03 = vld1q_lane_s32(outptr0, _sum03, 0); // out0
_sum03 = vld1q_lane_s32(outptr1, _sum03, 1); // out1
_sum03 = vld1q_lane_s32(outptr2, _sum03, 2); // out2
_sum03 = vld1q_lane_s32(outptr3, _sum03, 3); // out3
_sum47 = vld1q_lane_s32(outptr4, _sum47, 0); // out4
_sum47 = vld1q_lane_s32(outptr5, _sum47, 1); // out5
_sum47 = vld1q_lane_s32(outptr6, _sum47, 2); // out6
_sum47 = vld1q_lane_s32(outptr7, _sum47, 3); // out7
// k0 - k2
int8x8_t _k0_8 = vld1_s8(ktmp); //(k00-k70)
int8x8_t _k1_8 = vld1_s8(ktmp + 8); //(k01-k71)
int8x8_t _k2_8 = vld1_s8(ktmp + 16); //(k02-k72)
int16x8_t _k0 = vmovl_s8(_k0_8);
int16x8_t _k1 = vmovl_s8(_k1_8);
int16x8_t _k2 = vmovl_s8(_k2_8);
int32x4_t _sum0 = vmull_laneq_s16(vget_low_s16(_k0), _r0, 0);
int32x4_t _sum0n = vmull_laneq_s16(vget_high_s16(_k0), _r0, 0);
int32x4_t _sum1 = vmull_laneq_s16(vget_low_s16(_k1), _r0, 1);
int32x4_t _sum1n = vmull_laneq_s16(vget_high_s16(_k1), _r0, 1);
_sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r0, 2);
_sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r0, 2);
// k3 - k5
_k0_8 = vld1_s8(ktmp + 24); //(k03-k73)
_k1_8 = vld1_s8(ktmp + 32); //(k04-k74)
_k2_8 = vld1_s8(ktmp + 40); //(k05-k75)
_k0 = vmovl_s8(_k0_8);
_k1 = vmovl_s8(_k1_8);
_k2 = vmovl_s8(_k2_8);
_sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r1, 0);
_sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r1, 0);
_sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r1, 1);
_sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r1, 1);
_sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r1, 2);
_sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r1, 2);
// k6 - k8
_k0_8 = vld1_s8(ktmp + 48); //(k06-k76)
_k1_8 = vld1_s8(ktmp + 56); //(k07-k77)
_k2_8 = vld1_s8(ktmp + 64); //(k08-k78)
_k0 = vmovl_s8(_k0_8);
_k1 = vmovl_s8(_k1_8);
_k2 = vmovl_s8(_k2_8);
_sum0 = vmlal_laneq_s16(_sum0, vget_low_s16(_k0), _r2, 0);
_sum0n = vmlal_laneq_s16(_sum0n, vget_high_s16(_k0), _r2, 0);
_sum1 = vmlal_laneq_s16(_sum1, vget_low_s16(_k1), _r2, 1);
_sum1n = vmlal_laneq_s16(_sum1n, vget_high_s16(_k1), _r2, 1);
_sum03 = vmlal_laneq_s16(_sum03, vget_low_s16(_k2), _r2, 2);
_sum47 = vmlal_laneq_s16(_sum47, vget_high_s16(_k2), _r2, 2);
_sum0 = vaddq_s32(_sum0, _sum1);
_sum0n = vaddq_s32(_sum0n, _sum1n);
_sum03 = vaddq_s32(_sum03, _sum0);
_sum47 = vaddq_s32(_sum47, _sum0n);
vst1q_lane_s32(outptr0, _sum03, 0);
vst1q_lane_s32(outptr1, _sum03, 1);
vst1q_lane_s32(outptr2, _sum03, 2);
vst1q_lane_s32(outptr3, _sum03, 3);
vst1q_lane_s32(outptr4, _sum47, 0);
vst1q_lane_s32(outptr5, _sum47, 1);
vst1q_lane_s32(outptr6, _sum47, 2);
vst1q_lane_s32(outptr7, _sum47, 3);
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
#else // __aarch64__
asm volatile(
"pld [%8, #64] \n"
"vld1.s8 {d0}, [%8] \n" // d0(a00 a01 a02 ....)
"pld [%9, #64] \n"
"vld1.s8 {d2}, [%9] \n" // d2(a10 a11 a12 ....)
"pld [%10, #64] \n"
"vld1.s8 {d4}, [%10] \n" // d4(a20 a21 a22 ....)
"pld [%11, #64] \n"
"vld1.s8 {d6-d8}, [%11]! \n" // d6(k00-k70) d7(k01-k71) d8(k02-k72)
"vmovl.s8 q0, d0 \n" // d0(a00 a01 a02 x)
"vmovl.s8 q1, d2 \n" // d2(a10 a11 a12 x)
"vmovl.s8 q2, d4 \n" // d4(a20 a21 a22 x)
"vmovl.s8 q5, d8 \n" // d10(k02-k32) d11(k42-k72)
"vmovl.s8 q4, d7 \n" // d8(k01-k31) d9(k41-k71)
"vmovl.s8 q3, d6 \n" // d6(k00-k30) d7(k40-k70)
"vld1.s32 {d20[0]}, [%0] \n" // out0 q10
"vld1.s32 {d20[1]}, [%1] \n" // out1
"vld1.s32 {d21[0]}, [%2] \n" // out2
"vld1.s32 {d21[1]}, [%3] \n" // out3
"pld [%11, #64] \n"
"vld1.s8 {d24-d26}, [%11]! \n"
"vmovl.s8 q14, d26 \n" // d28(k05-k35) d29(k45-k75)
"vmovl.s8 q13, d25 \n" // d26(k04-k34) d27(k44-k74)
"vmovl.s8 q12, d24 \n" // d24(k03-k33) d25(k43-k73)
"vld1.s32 {d22[0]}, [%4] \n" // out4 q11
"vld1.s32 {d22[1]}, [%5] \n" // out5
"vld1.s32 {d23[0]}, [%6] \n" // out6
"vld1.s32 {d23[1]}, [%7] \n" // out7
"vmull.s16 q6, d6, d0[0] \n" // a00 x (k00-k30)
"vmull.s16 q7, d7, d0[0] \n" // a00 x (k40-k70)
"vmull.s16 q8, d8, d0[1] \n" // a01 x (k01-k31)
"vmull.s16 q9, d9, d0[1] \n" // a01 x (k41-k71)
"vmlal.s16 q10, d10, d0[2] \n" // a02 x (k02-k32)
"vmlal.s16 q11, d11, d0[2] \n" // a02 x (k42-k72)
"pld [%11, #64] \n"
"vld1.s8 {d6-d8}, [%11]! \n"
"vmovl.s8 q5, d8 \n" // d10(k08-k38) d11(k48-k78)
"vmovl.s8 q4, d7 \n" // d8(k07-k37) d9(k47-k77)
"vmovl.s8 q3, d6 \n" // d6(k06-k36) d7(k46-k76)
"vmlal.s16 q6, d24, d2[0] \n" // a10 x (k03-k33)
"vmlal.s16 q7, d25, d2[0] \n" // a10 x (k43-k73)
"vmlal.s16 q8, d26, d2[1] \n" // a11 x (k04-k34)
"vmlal.s16 q9, d27, d2[1] \n" // a11 x (k44-k74)
"vmlal.s16 q10, d28, d2[2] \n" // a12 x (k05-k35)
"vmlal.s16 q11, d29, d2[2] \n" // a12 x (k45-k75)
"vmlal.s16 q6, d6, d4[0] \n" // a20 x (k06-k36)
"vmlal.s16 q7, d7, d4[0] \n" // a20 x (k46-k76)
"vmlal.s16 q8, d8, d4[1] \n" // a21 x (k07-k37)
"vmlal.s16 q9, d9, d4[1] \n" // a21 x (k47-k77)
"vmlal.s16 q10, d10, d4[2] \n" // a22 x (k08-k38)
"vmlal.s16 q11, d11, d4[2] \n" // a22 x (k48-k78)
"vadd.s32 q8, q8, q6 \n"
"vadd.s32 q9, q9, q7 \n"
"sub %11, %11, #72 \n"
"vadd.s32 q10, q10, q8 \n"
"vadd.s32 q11, q11, q9 \n"
"vst1.s32 {d20[0]}, [%0]! \n" // out0
"vst1.s32 {d20[1]}, [%1]! \n" // out1
"vst1.s32 {d21[0]}, [%2]! \n" // out2
"vst1.s32 {d21[1]}, [%3]! \n" // out3
"vst1.s32 {d22[0]}, [%4]! \n" // out4
"vst1.s32 {d22[1]}, [%5]! \n" // out5
"vst1.s32 {d23[0]}, [%6]! \n" // out6
"vst1.s32 {d23[1]}, [%7]! \n" // out7
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(ktmp) // %11
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(ktmp)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
#else // __ARM_NEON
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
sum0 += (int)r0[0] * ktmp[0];
sum1 += (int)r0[0] * ktmp[1];
sum2 += (int)r0[0] * ktmp[2];
sum3 += (int)r0[0] * ktmp[3];
sum4 += (int)r0[0] * ktmp[4];
sum5 += (int)r0[0] * ktmp[5];
sum6 += (int)r0[0] * ktmp[6];
sum7 += (int)r0[0] * ktmp[7];
ktmp += 8;
sum0 += (int)r0[1] * ktmp[0];
sum1 += (int)r0[1] * ktmp[1];
sum2 += (int)r0[1] * ktmp[2];
sum3 += (int)r0[1] * ktmp[3];
sum4 += (int)r0[1] * ktmp[4];
sum5 += (int)r0[1] * ktmp[5];
sum6 += (int)r0[1] * ktmp[6];
sum7 += (int)r0[1] * ktmp[7];
ktmp += 8;
sum0 += (int)r0[2] * ktmp[0];
sum1 += (int)r0[2] * ktmp[1];
sum2 += (int)r0[2] * ktmp[2];
sum3 += (int)r0[2] * ktmp[3];
sum4 += (int)r0[2] * ktmp[4];
sum5 += (int)r0[2] * ktmp[5];
sum6 += (int)r0[2] * ktmp[6];
sum7 += (int)r0[2] * ktmp[7];
ktmp += 8;
sum0 += (int)r1[0] * ktmp[0];
sum1 += (int)r1[0] * ktmp[1];
sum2 += (int)r1[0] * ktmp[2];
sum3 += (int)r1[0] * ktmp[3];
sum4 += (int)r1[0] * ktmp[4];
sum5 += (int)r1[0] * ktmp[5];
sum6 += (int)r1[0] * ktmp[6];
sum7 += (int)r1[0] * ktmp[7];
ktmp += 8;
sum0 += (int)r1[1] * ktmp[0];
sum1 += (int)r1[1] * ktmp[1];
sum2 += (int)r1[1] * ktmp[2];
sum3 += (int)r1[1] * ktmp[3];
sum4 += (int)r1[1] * ktmp[4];
sum5 += (int)r1[1] * ktmp[5];
sum6 += (int)r1[1] * ktmp[6];
sum7 += (int)r1[1] * ktmp[7];
ktmp += 8;
sum0 += (int)r1[2] * ktmp[0];
sum1 += (int)r1[2] * ktmp[1];
sum2 += (int)r1[2] * ktmp[2];
sum3 += (int)r1[2] * ktmp[3];
sum4 += (int)r1[2] * ktmp[4];
sum5 += (int)r1[2] * ktmp[5];
sum6 += (int)r1[2] * ktmp[6];
sum7 += (int)r1[2] * ktmp[7];
ktmp += 8;
sum0 += (int)r2[0] * ktmp[0];
sum1 += (int)r2[0] * ktmp[1];
sum2 += (int)r2[0] * ktmp[2];
sum3 += (int)r2[0] * ktmp[3];
sum4 += (int)r2[0] * ktmp[4];
sum5 += (int)r2[0] * ktmp[5];
sum6 += (int)r2[0] * ktmp[6];
sum7 += (int)r2[0] * ktmp[7];
ktmp += 8;
sum0 += (int)r2[1] * ktmp[0];
sum1 += (int)r2[1] * ktmp[1];
sum2 += (int)r2[1] * ktmp[2];
sum3 += (int)r2[1] * ktmp[3];
sum4 += (int)r2[1] * ktmp[4];
sum5 += (int)r2[1] * ktmp[5];
sum6 += (int)r2[1] * ktmp[6];
sum7 += (int)r2[1] * ktmp[7];
ktmp += 8;
sum0 += (int)r2[2] * ktmp[0];
sum1 += (int)r2[2] * ktmp[1];
sum2 += (int)r2[2] * ktmp[2];
sum3 += (int)r2[2] * ktmp[3];
sum4 += (int)r2[2] * ktmp[4];
sum5 += (int)r2[2] * ktmp[5];
sum6 += (int)r2[2] * ktmp[6];
sum7 += (int)r2[2] * ktmp[7];
ktmp += 8;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
*outptr4 += sum4;
*outptr5 += sum5;
*outptr6 += sum6;
*outptr7 += sum7;
ktmp -= 8 * 9;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
ktmp += 8 * 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out = top_blob.channel(p);
out.fill(0);
const signed char* ktmp = _kernel.channel(p / 8 + p % 8);
for (int q = 0; q < inch; q++)
{
int* outptr = out;
const signed char* img0 = bottom_blob.channel(q);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v0.8b, v1.8b}, [%5] \n" //ktmp
"ld2 {v2.8b, v3.8b}, [%2], #16 \n" //r0-r2
"ld2 {v4.8b, v5.8b}, [%2] \n"
"ld2 {v6.8b, v7.8b}, [%3], #16 \n" //r3-r5
"ld2 {v8.8b, v9.8b}, [%3] \n"
"ld2 {v10.8b, v11.8b}, [%4], #16 \n" //r6-r8
"ld2 {v12.8b, v13.8b}, [%4] \n"
"ld1 {v14.4s, v15.4s}, [%1] \n" //out0
"ext v4.8b, v2.8b, v4.8b, #1 \n"
"ext v8.8b, v6.8b, v8.8b, #1 \n"
"ext v12.8b, v10.8b, v12.8b, #1 \n"
"sshll v0.8h, v0.8b, #0 \n" //(k0-k7)
"sshll v1.8h, v1.8b, #0 \n" //(k8)
"sshll v2.8h, v2.8b, #0 \n" // r0
"sshll v3.8h, v3.8b, #0 \n" // r1
"sshll v4.8h, v4.8b, #0 \n" // r2
"sshll v6.8h, v6.8b, #0 \n" // r3
"sshll v7.8h, v7.8b, #0 \n" // r4
"sshll v8.8h, v8.8b, #0 \n" // r5
"sshll v10.8h, v10.8b, #0 \n" // r6
"sshll v11.8h, v11.8b, #0 \n" // r7
"sshll v12.8h, v12.8b, #0 \n" // r8
// r0
"smull v16.4s, v2.4h, v0.h[0] \n" // out = r0*k0
"smull2 v17.4s, v2.8h, v0.h[0] \n"
"smull v18.4s, v3.4h, v0.h[1] \n" // outn = r1*k1
"smull2 v19.4s, v3.8h, v0.h[1] \n"
"smlal v16.4s, v4.4h, v0.h[2] \n" // out = r2*k2
"smlal2 v17.4s, v4.8h, v0.h[2] \n"
"smlal v18.4s, v6.4h, v0.h[3] \n" // outn = r3*k3
"smlal2 v19.4s, v6.8h, v0.h[3] \n"
"smlal v16.4s, v7.4h, v0.h[4] \n" // out = r4*k4
"smlal2 v17.4s, v7.8h, v0.h[4] \n"
"smlal v18.4s, v8.4h, v0.h[5] \n" // outn = r5*k5
"smlal2 v19.4s, v8.8h, v0.h[5] \n"
"smlal v16.4s, v10.4h, v0.h[6] \n" // out = r6*k6
"smlal2 v17.4s, v10.8h, v0.h[6] \n"
"smlal v18.4s, v11.4h, v0.h[7] \n" // outn = r7*k7
"smlal2 v19.4s, v11.8h, v0.h[7] \n"
"smlal v16.4s, v12.4h, v1.h[0] \n" // out = r8*k8
"smlal2 v17.4s, v12.8h, v1.h[0] \n"
"add v8.4s, v16.4s, v18.4s \n"
"add v9.4s, v17.4s, v19.4s \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(ktmp) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(ktmp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
#else
if (nn > 0)
{
asm volatile(
"vld1.s8 {d0-d1}, [%5] \n" // d0(k0 - k7) d1(k8 ...)
"vmovl.s8 q1, d1 \n" // d2(k8 ...)
"vmovl.s8 q0, d0 \n" // d0(k0 - k3) d1(k4 - k7)
"0: \n"
"pld [%2, #192] \n"
"vld2.s8 {d4-d5}, [%2]! \n" // r0 d4(a00 a02 ... a014) d5(a01 a03 ... a015)
"vld2.s8 {d8-d9}, [%2] \n" // d8(a016 ....)
"vld2.s8 {d10-d11}, [%3]! \n" // r1 d10(a10 a12 ... a114) d11(a11 a13 ... a115)
"vld2.s8 {d14-d15}, [%3] \n" // d14(a116 ....)
"vld2.s8 {d16-d17}, [%4]! \n" // r2 d16(a20 a22 ... a214) d17(a21 a23 ... a215)
"vld2.s8 {d20-d21}, [%4] \n" // d20(a216 ....)
"vld1.s32 {d22-d25}, [%1] \n" // q11(out0 - out3) q12(out4 - out7)
"vext.s8 d8, d4, d8, #1 \n" // d8(a02 a04 ... a016)
"vext.s8 d14, d10, d14, #1 \n" // d14(a12 a14 ... a116)
"vext.s8 d20, d16, d20, #1 \n" // d20(a22 a24 ... a216)
"vmovl.s8 q3, d5 \n" // q3(a01 a03 ... a015)
"vmovl.s8 q2, d4 \n" // q2(a00 a02 ... a014)
"vmovl.s8 q4, d8 \n" // q4(a02 a04 ... a016)
"vmovl.s8 q6, d11 \n" // q6(a11 a13 ... a115)
"vmovl.s8 q5, d10 \n" // q5(a10 a12 ... a114)
"vmovl.s8 q7, d14 \n" // q7(a12 a14 ... a116)
"vmovl.s8 q9, d17 \n" // q9(a21 a23 ... a215)
"vmovl.s8 q8, d16 \n" // q8(a20 a22 ... a214)
"vmovl.s8 q10, d20 \n" // q10(a22 a24 ... a216)
"vmlal.s16 q11, d4, d0[0] \n" // k0
"vmlal.s16 q12, d5, d0[0] \n"
"vmull.s16 q13, d6, d0[1] \n" // k1
"vmull.s16 q14, d7, d0[1] \n"
"vmlal.s16 q11, d8, d0[2] \n" // k2
"vmlal.s16 q12, d9, d0[2] \n"
"vmlal.s16 q13, d12, d1[0] \n" // k4
"vmlal.s16 q14, d13, d1[0] \n"
"vmlal.s16 q11, d10, d0[3] \n" // k3
"vmlal.s16 q12, d11, d0[3] \n"
"vmlal.s16 q13, d14, d1[1] \n" // k5
"vmlal.s16 q14, d15, d1[1] \n"
"vmlal.s16 q11, d16, d1[2] \n" // k6
"vmlal.s16 q12, d17, d1[2] \n"
"vmlal.s16 q13, d18, d1[3] \n" // k7
"vmlal.s16 q14, d19, d1[3] \n"
"vmlal.s16 q11, d20, d2[0] \n" // k8
"vmlal.s16 q12, d21, d2[0] \n"
"vadd.s32 q11, q11, q13 \n"
"vadd.s32 q12, q12, q14 \n"
"vst1.32 {d22-d25}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(ktmp) // %5
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
if (remain > 0)
{
#if __ARM_NEON
int8x8_t _k01234567s8 = vld1_s8(ktmp);
int8x8_t _k8xxxxxxxs8 = vld1_s8(ktmp + 8);
int8x8_t _k34567xxxs8 = vext_s8(_k01234567s8, _k01234567s8, 3);
int8x8_t _k678xxxxxs8 = vext_s8(_k01234567s8, _k8xxxxxxxs8, 6);
int16x8_t _k0123_s16 = vmovl_s8(_k01234567s8);
int16x8_t _k3456_s16 = vmovl_s8(_k34567xxxs8);
int16x8_t _k678x_s16 = vmovl_s8(_k678xxxxxs8);
#endif
for (; remain > 0; remain--)
{
#if __ARM_NEON
int8x8_t _r00s8 = vld1_s8(r0);
int8x8_t _r10s8 = vld1_s8(r1);
int8x8_t _r20s8 = vld1_s8(r2);
int16x8_t _r00s16 = vmovl_s8(_r00s8);
int16x8_t _r10s16 = vmovl_s8(_r10s8);
int16x8_t _r20s16 = vmovl_s8(_r20s8);
int32x4_t _sum = vmull_s16(vget_low_s16(_r00s16), vget_low_s16(_k0123_s16));
_sum = vmlal_s16(_sum, vget_low_s16(_r10s16), vget_low_s16(_k3456_s16));
_sum = vmlal_s16(_sum, vget_low_s16(_r20s16), vget_low_s16(_k678x_s16));
_sum = vsetq_lane_s32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_s32(_sum);
#else
int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum));
_ss = vpadd_s32(_ss, _ss);
*outptr = vget_lane_s32(_ss, 0);
#endif // __aarch64__
#else
int sum = 0;
sum += (int)r0[0] * ktmp[0];
sum += (int)r0[1] * ktmp[1];
sum += (int)r0[2] * ktmp[2];
sum += (int)r1[0] * ktmp[3];
sum += (int)r1[1] * ktmp[4];
sum += (int)r1[2] * ktmp[5];
sum += (int)r2[0] * ktmp[6];
sum += (int)r2[1] * ktmp[7];
sum += (int)r2[2] * ktmp[8];
*outptr += sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
ktmp += 9;
}
}
}
|
wino_conv_kernel_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "wino_conv_kernel_x86.h"
#define TILE 4
#define ELEM_SIZE ((TILE + 2) * (TILE + 2))
#define WINO_MAX(a, b) ((a) > (b) ? (a) : (b))
#define WINO_MIN(a, b) ((a) < (b) ? (a) : (b))
static void relu(float* data, int size, int activation)
{
for (int i = 0; i < size; i++)
{
data[i] = WINO_MAX(data[i], ( float )0);
if (activation > 0)
{
data[i] = WINO_MIN(data[i], ( float )activation);
}
}
}
static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param)
{
int output_c = filter->dims[0];
int input_c = filter->dims[1];
int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float);
return trans_ker_size + 128; // caution
}
static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
static void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w);
}
}
static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
static void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w);
}
}
void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block,
float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch,
int outw, int outh, int outch, int num_thread)
{
size_t elemsize = sizeof(float);
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
float* bottom_blob_bordered = bottom_blob;
int outw_align = (outw + 3) / 4 * 4;
int outh_align = (outh + 3) / 4 * 4;
w = outw_align + 2;
h = outh_align + 2;
// BEGIN transform input
float* bottom_blob_tm = NULL;
{
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
const int tiles_n = 4 * inch * tiles;
bottom_blob_tm = transform_input;
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#endif
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered + q * w * h;
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q;
float* out_tm1 = out_tm0 + tiles_n;
float* out_tm2 = out_tm0 + 2 * tiles_n;
float* out_tm3 = out_tm0 + 3 * tiles_n;
float* out_tm4 = out_tm0 + 4 * tiles_n;
float* out_tm5 = out_tm0 + 5 * tiles_n;
float* out_tm6 = out_tm0 + 6 * tiles_n;
float* out_tm7 = out_tm0 + 7 * tiles_n;
float* out_tm8 = out_tm0 + 8 * tiles_n;
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
#ifdef _WIN32
{
_t0.m256_f32[0] = _w0.m256_f32[0];
_t1.m256_f32[0] = _w0.m256_f32[1];
_t2.m256_f32[0] = _w0.m256_f32[2];
_t3.m256_f32[0] = _w0.m256_f32[3];
_t4.m256_f32[0] = _w0.m256_f32[4];
_t5.m256_f32[0] = _w0.m256_f32[5];
_t0.m256_f32[1] = _w1.m256_f32[0];
_t1.m256_f32[1] = _w1.m256_f32[1];
_t2.m256_f32[1] = _w1.m256_f32[2];
_t3.m256_f32[1] = _w1.m256_f32[3];
_t4.m256_f32[1] = _w1.m256_f32[4];
_t5.m256_f32[1] = _w1.m256_f32[5];
_t0.m256_f32[2] = _w2.m256_f32[0];
_t1.m256_f32[2] = _w2.m256_f32[1];
_t2.m256_f32[2] = _w2.m256_f32[2];
_t3.m256_f32[2] = _w2.m256_f32[3];
_t4.m256_f32[2] = _w2.m256_f32[4];
_t5.m256_f32[2] = _w2.m256_f32[5];
_t0.m256_f32[3] = _w3.m256_f32[0];
_t1.m256_f32[3] = _w3.m256_f32[1];
_t2.m256_f32[3] = _w3.m256_f32[2];
_t3.m256_f32[3] = _w3.m256_f32[3];
_t4.m256_f32[3] = _w3.m256_f32[4];
_t5.m256_f32[3] = _w3.m256_f32[5];
_t0.m256_f32[4] = _w4.m256_f32[0];
_t1.m256_f32[4] = _w4.m256_f32[1];
_t2.m256_f32[4] = _w4.m256_f32[2];
_t3.m256_f32[4] = _w4.m256_f32[3];
_t4.m256_f32[4] = _w4.m256_f32[4];
_t5.m256_f32[4] = _w4.m256_f32[5];
_t0.m256_f32[5] = _w5.m256_f32[0];
_t1.m256_f32[5] = _w5.m256_f32[1];
_t2.m256_f32[5] = _w5.m256_f32[2];
_t3.m256_f32[5] = _w5.m256_f32[3];
_t4.m256_f32[5] = _w5.m256_f32[4];
_t5.m256_f32[5] = _w5.m256_f32[5];
}
#else
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
#endif
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
float output_n0[8] = {0.f};
_mm256_storeu_ps(output_n0, _n0);
float output_n1[8] = {0.f};
_mm256_storeu_ps(output_n1, _n1);
float output_n2[8] = {0.f};
_mm256_storeu_ps(output_n2, _n2);
float output_n3[8] = {0.f};
_mm256_storeu_ps(output_n3, _n3);
float output_n4[8] = {0.f};
_mm256_storeu_ps(output_n4, _n4);
float output_n5[8] = {0.f};
_mm256_storeu_ps(output_n5, _n5);
out_tm0[0] = output_n0[0];
out_tm0[1] = output_n0[1];
out_tm0[2] = output_n0[2];
out_tm0[3] = output_n0[3];
out_tm1[0] = output_n0[4];
out_tm1[1] = output_n0[5];
out_tm1[2] = output_n1[0];
out_tm1[3] = output_n1[1];
out_tm2[0] = output_n1[2];
out_tm2[1] = output_n1[3];
out_tm2[2] = output_n1[4];
out_tm2[3] = output_n1[5];
out_tm3[0] = output_n2[0];
out_tm3[1] = output_n2[1];
out_tm3[2] = output_n2[2];
out_tm3[3] = output_n2[3];
out_tm4[0] = output_n2[4];
out_tm4[1] = output_n2[5];
out_tm4[2] = output_n3[0];
out_tm4[3] = output_n3[1];
out_tm5[0] = output_n3[2];
out_tm5[1] = output_n3[3];
out_tm5[2] = output_n3[4];
out_tm5[3] = output_n3[5];
out_tm6[0] = output_n4[0];
out_tm6[1] = output_n4[1];
out_tm6[2] = output_n4[2];
out_tm6[3] = output_n4[3];
out_tm7[0] = output_n4[4];
out_tm7[1] = output_n4[5];
out_tm7[2] = output_n5[0];
out_tm7[3] = output_n5[1];
out_tm8[0] = output_n5[2];
out_tm8[1] = output_n5[3];
out_tm8[2] = output_n5[4];
out_tm8[3] = output_n5[5];
#else
float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __AVX__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
// BEGIN dot
float* top_blob_tm = NULL;
{
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
const int tiles_n = 36 * tiles;
top_blob_tm = dot_block;
#pragma omp parallel for num_threads(num_thread)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp << 3;
float* output0_tm = top_blob_tm + tiles_n * p;
float* output1_tm = top_blob_tm + tiles_n * (p + 1);
float* output2_tm = top_blob_tm + tiles_n * (p + 2);
float* output3_tm = top_blob_tm + tiles_n * (p + 3);
float* output4_tm = top_blob_tm + tiles_n * (p + 4);
float* output5_tm = top_blob_tm + tiles_n * (p + 5);
float* output6_tm = top_blob_tm + tiles_n * (p + 6);
float* output7_tm = top_blob_tm + tiles_n * (p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q = 0;
for (; q + 3 < inch; q = q + 4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0 + 4);
__m128 _r2 = _mm_loadu_ps(r0 + 8);
__m128 _r3 = _mm_loadu_ps(r0 + 12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
sum4[n] += r0[n] * kptr[n + 16];
sum5[n] += r0[n] * kptr[n + 20];
sum6[n] += r0[n] * kptr[n + 24];
sum7[n] += r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm + tiles_n * p;
float* output1_tm = top_blob_tm + tiles_n * (p + 1);
float* output2_tm = top_blob_tm + tiles_n * (p + 2);
float* output3_tm = top_blob_tm + tiles_n * (p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 16;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm + 36 * tiles * p;
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr =
kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 4;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
}
}
// END dot
// BEGIN transform output
float* top_blob_bordered = NULL;
if (outw_align == outw && outh_align == outh)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered = output_bordered;
}
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
float* out_tile = top_blob_tm + 36 * tiles * p;
float* outRow0 = top_blob_bordered + outw_align * outh_align * p;
float* outRow1 = outRow0 + outw_align;
float* outRow2 = outRow0 + outw_align * 2;
float* outRow3 = outRow0 + outw_align * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
// TODO AVX2
float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
float w0[6], w1[6], w2[6], w3[6];
float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
float o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw_align * 3;
outRow1 += outw_align * 3;
outRow2 += outw_align * 3;
outRow3 += outw_align * 3;
}
}
}
// END transform output
if (outw_align != outw || outh_align != outw)
{
delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0);
}
}
void conv3x3s1_winograd43_transform_kernel_sse(const float* kernel, float* kernel_wino, int inch, int outch)
{
float* kernel_tm = ( float* )sys_malloc(6 * 6 * inch * outch * sizeof(float));
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f}};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36;
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3] = {0};
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
float* kernel_tm_test = kernel_wino;
for (int r = 0; r < 9; r++)
{
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36;
const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36;
const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36;
const float* kernel4 = ( const float* )kernel_tm + (p + 4) * inch * 36;
const float* kernel5 = ( const float* )kernel_tm + (p + 5) * inch * 36;
const float* kernel6 = ( const float* )kernel_tm + (p + 6) * inch * 36;
const float* kernel7 = ( const float* )kernel_tm + (p + 7) * inch * 36;
float* ktmp = kernel_tm_test + p / 8 * inch * 32;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36;
const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36;
const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36;
float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4) * inch * 16;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm_test += 4 * inch * outch;
}
free(kernel_tm);
}
int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param)
{
int batch = input_tensor->dims[0];
int input_c = input_tensor->dims[1];
int input_h = input_tensor->dims[2];
int input_w = input_tensor->dims[3];
int output_c = output_tensor->dims[1];
int output_h = output_tensor->dims[2];
int output_w = output_tensor->dims[3];
int pad_h = param->pad_h0;
int pad_w = param->pad_w0;
float* kernel = ( float* )filter_tensor->data;
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
int block_h = (output_h + TILE - 1) / TILE;
int block_w = (output_w + TILE - 1) / TILE;
int block = block_h * block_w;
int padded_inh = TILE * block_h + 2;
int padded_inw = TILE * block_w + 2;
int pad_inhw = padded_inh * padded_inw;
int outw = block_w * TILE;
int outh = block_h * TILE;
priv_info->input_pad = ( float* )sys_malloc(batch * input_c * pad_inhw * sizeof(float));
memset(priv_info->input_pad, 0, batch * input_c * pad_inhw * sizeof(float));
priv_info->dot_block = ( float* )sys_malloc(ELEM_SIZE * block * output_c * sizeof(float));
priv_info->transform_input = ( float* )sys_malloc(ELEM_SIZE * block * input_c * sizeof(float));
priv_info->output_bordered = NULL;
if (outw != output_w || outh != output_h)
{
priv_info->output_bordered = ( float* )sys_malloc(outw * outh * output_c * sizeof(float));
}
conv3x3s1_winograd43_transform_kernel_sse(kernel, ( float* )priv_info->interleave_buffer, input_c, output_c);
return 0;
}
int wino_conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
if (priv_info->input_pad)
{
sys_free(priv_info->input_pad);
priv_info->input_pad = NULL;
}
if (priv_info->dot_block)
{
sys_free(priv_info->dot_block);
priv_info->dot_block = NULL;
}
if (priv_info->transform_input)
{
sys_free(priv_info->transform_input);
priv_info->transform_input = NULL;
}
if (priv_info->output_bordered)
{
sys_free(priv_info->output_bordered);
priv_info->output_bordered = NULL;
}
return 0;
}
int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
/* param */
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int act_type = param->activation;
int group = param->group;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1];
int in_c_g = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int input_size_g = in_c_g * in_h * in_w;
int kernel_size = in_c * kernel_h * kernel_w;
int out_c = output_tensor->dims[1];
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_hw = out_h * out_w;
int output_size = out_c * out_h * out_w;
int out_c_align = ((out_c + 3) & -4);
/* wino param */
int block_h = (out_h + TILE - 1) / TILE;
int block_w = (out_w + TILE - 1) / TILE;
int block_hw = block_h * block_w;
int padded_in_h = block_h * TILE + 2;
int padded_in_w = block_w * TILE + 2;
int padded_in_hw = padded_in_h * padded_in_w;
/* buffer addr */
float* input = ( float* )input_tensor->data;
float* output = ( float* )output_tensor->data;
float* biases = NULL;
if (bias_tensor != NULL)
biases = ( float* )bias_tensor->data;
for (int i = 0; i < batch; i++)
{
for (int g = 0; g < group; g++)
{
pad_0_align_3D((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w, input + i * in_c * in_h * in_w,
in_h, in_w, padded_in_h, padded_in_w, in_c, pad_h0, pad_w0);
conv3x3s1_winograd43_sse((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w + g * input_size_g,
output + i * out_c * out_h * out_w, priv_info->interleave_buffer,
priv_info->dot_block, priv_info->transform_input, priv_info->output_bordered,
biases, padded_in_w, padded_in_h, in_c, out_w, out_h, out_c, num_thread);
}
}
if (act_type >= 0)
{
relu(output, batch * output_size, act_type);
}
return 0;
} |
common.h | #pragma once
#include <algorithm>
#include <cmath>
#include "co_types.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
#define DISABLE_COPY_AND_ASSIGN(classname) \
private: \
classname(const classname&) = delete; \
classname& operator=(const classname&) = delete;
template <typename T>
struct FillFunctor {
T* arr;
const T val;
FillFunctor(T* arr, const T val) : arr(arr), val(val) {}
CPU_GPU_FUNCTION void operator()(const int idx) { arr[idx] = val; }
};
CPU_GPU_FUNCTION
inline void co_syncthreads() {
#ifdef __CUDA_ARCH__
__syncthreads();
#else
#if defined(_OPENMP)
#pragma omp barrier
#endif
#endif
}
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(
address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN
// != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
#endif
template <typename T>
CPU_GPU_FUNCTION inline void co_atomic_add(T* addr, T val) {
#ifdef __CUDA_ARCH__
atomicAdd(addr, val);
#else
#if defined(_OPENMP)
#pragma omp atomic
#endif
*addr += val;
#endif
}
CPU_GPU_FUNCTION
inline void co_atomic_min(float* addr, float value) {
#ifdef __CUDA_ARCH__
if (value >= 0) {
__int_as_float(atomicMin((int*)addr, __float_as_int(value)));
} else {
__uint_as_float(atomicMax((unsigned int*)addr, __float_as_uint(value)));
}
#else
#if defined(_OPENMP)
#pragma omp atomic
#endif
*addr = std::min(*addr, value);
#endif
}
CPU_GPU_FUNCTION
inline void co_atomic_min(double* addr, double value) {
#ifdef __CUDA_ARCH__
unsigned long long ret = __double_as_longlong(*addr);
while (value < __longlong_as_double(ret)) {
unsigned long long old = ret;
if ((ret = atomicCAS((unsigned long long*)addr, old,
__double_as_longlong(value))) == old)
break;
}
#else
#if defined(_OPENMP)
#pragma omp atomic
#endif
*addr = std::min(*addr, value);
#endif
}
CPU_GPU_FUNCTION
inline void co_atomic_max(float* addr, float value) {
#ifdef __CUDA_ARCH__
if (value >= 0) {
__int_as_float(atomicMax((int*)addr, __float_as_int(value)));
} else {
__uint_as_float(atomicMin((unsigned int*)addr, __float_as_uint(value)));
}
#else
#if defined(_OPENMP)
#pragma omp atomic
#endif
*addr = std::max(*addr, value);
#endif
}
CPU_GPU_FUNCTION
inline void co_atomic_max(double* addr, double value) {
#ifdef __CUDA_ARCH__
unsigned long long ret = __double_as_longlong(*addr);
while (value > __longlong_as_double(ret)) {
unsigned long long old = ret;
if ((ret = atomicCAS((unsigned long long*)addr, old,
__double_as_longlong(value))) == old)
break;
}
#else
#if defined(_OPENMP)
#pragma omp atomic
#endif
*addr = std::max(*addr, value);
#endif
}
template <typename T>
CPU_GPU_FUNCTION inline T co_exp(const T& a) {
#ifdef __CUDA_ARCH__
return exp(a);
#else
return std::exp(a);
#endif
}
template <typename T>
CPU_GPU_FUNCTION inline T co_abs(const T& a) {
#ifdef __CUDA_ARCH__
return abs(a);
#else
return std::abs(a);
#endif
}
// template <>
// CPU_GPU_FUNCTION
// inline float co_abs<float>(const float& a) {
// #ifdef __CUDA_ARCH__
// return fabsf(a);
// #else
// return std::abs(a);
// #endif
// }
template <typename T>
CPU_GPU_FUNCTION inline T co_min(const T& a, const T& b) {
#ifdef __CUDA_ARCH__
return min(a, b);
#else
return std::min(a, b);
#endif
}
// template <>
// CPU_GPU_FUNCTION
// inline float co_min<float>(const float& a, const float& b) {
// #ifdef __CUDA_ARCH__
// return fminf(a, b);
// #else
// return std::min(a, b);
// #endif
// }
template <typename T>
CPU_GPU_FUNCTION inline T co_max(const T& a, const T& b) {
#ifdef __CUDA_ARCH__
return max(a, b);
#else
return std::max(a, b);
#endif
}
// template <>
// CPU_GPU_FUNCTION
// inline float co_max<float>(const float& a, const float& b) {
// #ifdef __CUDA_ARCH__
// return fmaxf(a, b);
// #else
// return std::max(a, b);
// #endif
// }
template <typename T>
CPU_GPU_FUNCTION inline T co_round(const T& a) {
#ifdef __CUDA_ARCH__
return round(a);
#else
return round(a);
#endif
}
// template <>
// CPU_GPU_FUNCTION
// inline float co_round(const float& a) {
// #ifdef __CUDA_ARCH__
// return roundf(a);
// #else
// return round(a);
// #endif
// }
template <typename T>
CPU_GPU_FUNCTION inline T co_floor(const T& a) {
#ifdef __CUDA_ARCH__
return floor(a);
#else
return std::floor(a);
#endif
}
// template <>
// CPU_GPU_FUNCTION
// inline float co_floor(const float& a) {
// #ifdef __CUDA_ARCH__
// return floorf(a);
// #else
// return std::floor(a);
// #endif
// }
template <typename T>
CPU_GPU_FUNCTION inline T interp_triangle(T x) {
// return (-0.5 <= x) && (x < 0.5);
return (x + 1) * ((-1 <= x) && (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1));
}
template <typename T>
CPU_GPU_FUNCTION inline T interp_triangle_bwd(T x) {
// return 0;
return (1) * ((-1 <= x) && (x < 0)) + (-1) * ((0 <= x) & (x <= 1));
}
|
measures_linear_threads.c |
#include <int3d.h>
#include <hpchain.h>
#include <movchain.h>
#include <fitness/fitness.h>
#include <config.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include "fitness_private.h"
#include "gyration.h"
#define COORD3D(V, AXIS) COORD(V.x, V.y, V.z, AXIS)
#define COORD(X, Y, Z, AXIS) ( (Z+AXIS/2) * (AXIS*(long int)AXIS) + (Y+AXIS/2) * ((long int)AXIS) + (X+AXIS/2))
static FitnessCalc *FIT_BUNDLE = NULL;
void FitnessCalc_initialize(const HPElem * hpChain, int hpSize){
if(FIT_BUNDLE != NULL){
fprintf(stderr, "%s", "Double initialization.\n");
exit(EXIT_FAILURE);
}
int i;
int numThreads = omp_get_max_threads();
int axisSize = (hpSize+3)*2;
long int spaceSize = axisSize * axisSize * (long int) axisSize;
// Verify memory usage
if(numThreads * spaceSize * sizeof(char) > MAX_MEMORY){
fprintf(stderr, "Will not allocate more than %g memory.\n", (double) MAX_MEMORY);
exit(EXIT_FAILURE);
}
// Allocate on bundle for each thread
FIT_BUNDLE = (FitnessCalc *) malloc(sizeof(FitnessCalc) * numThreads);
// Initialize bundles
for(i = 0; i < numThreads; i++){
FIT_BUNDLE[i].hpChain = hpChain;
FIT_BUNDLE[i].hpSize = hpSize;
FIT_BUNDLE[i].axisSize = axisSize;
}
// Keep initializing bundles
const int gyration = calc_max_gyration(hpChain, hpSize);
for(i = 0; i < numThreads; i++){
FIT_BUNDLE[i].maxGyration = gyration;
}
// Final initialization
for(i = 0; i < numThreads; i++){
FIT_BUNDLE[i].space3d = (void *) malloc(spaceSize * sizeof(char));
if(FIT_BUNDLE[i].space3d == NULL){
fprintf(stderr, "Malloc returned error when allocating memory! Attempted to allocate %lf GiB\n", numThreads * spaceSize * sizeof(char) / 1024.0 / 1024.0 / 1024.0);
}
}
}
void FitnessCalc_cleanup(){
int i;
int numThreads = omp_get_max_threads();
for(i = 0; i < numThreads; i++){
free(FIT_BUNDLE[i].space3d);
}
free(FIT_BUNDLE);
FIT_BUNDLE = NULL;
}
/* Returns the FitnessCalc
*/
FitnessCalc FitnessCalc_get(){
if(FIT_BUNDLE == NULL){
fprintf(stderr, "%s", "FitnessCalc must be initialized.\n");
exit(EXIT_FAILURE);
}
return FIT_BUNDLE[0];
}
/* Counts the number of collision within a vector of beads
* 'space3d' is 3D lattice whose axis has size axisSize (positive + negative sides of the axis).
*/
static
int count_collisions(int tid, const int3d *beads, int nBeads){
int i, collisions;
// Get space3d associated with that thread
char *space3d = FIT_BUNDLE[tid].space3d;
int axisSize = FIT_BUNDLE[tid].axisSize;
collisions = 0;
// Reset space
for(i = 0; i < nBeads; i++){
long int idx = COORD3D(beads[i], axisSize);
space3d[idx] = 0;
}
// Place beads in the space (actually calculate the collisions at the same time)
for(i = 0; i < nBeads; i++){
long int idx = COORD3D(beads[i], axisSize);
collisions += space3d[idx];
space3d[idx]++;
}
return collisions;
}
/* Counts the number of contacts within a vector of beads
* 'space3d' is 3D lattice whose axis has size axisSize (positive + negative sides of the axis).
*/
static
int count_contacts(int tid, const int3d *beads, int nBeads){
int i;
// Get space3d associated with that thread
char *space3d = FIT_BUNDLE[tid].space3d;
int axisSize = FIT_BUNDLE[tid].axisSize;
int contacts = 0;
// Reset space
for(i = 0; i < nBeads; i++){
int3d a = beads[i];
space3d[COORD(a.x+1, a.y, a.z, axisSize)] = 0;
space3d[COORD(a.x-1, a.y, a.z, axisSize)] = 0;
space3d[COORD(a.x, a.y+1, a.z, axisSize)] = 0;
space3d[COORD(a.x, a.y-1, a.z, axisSize)] = 0;
space3d[COORD(a.x, a.y, a.z+1, axisSize)] = 0;
space3d[COORD(a.x, a.y, a.z-1, axisSize)] = 0;
// Yes, there is no need to reset the point itself.
}
// Place beads in the space
for(i = 0; i < nBeads; i++){
int3d a = beads[i];
space3d[COORD(a.x, a.y, a.z, axisSize)]++;
}
// Count HH and HP contacts
for(i = 0; i < nBeads; i++){
int3d a = beads[i];
contacts += space3d[COORD(a.x+1, a.y, a.z, axisSize)];
contacts += space3d[COORD(a.x-1, a.y, a.z, axisSize)];
contacts += space3d[COORD(a.x, a.y+1, a.z, axisSize)];
contacts += space3d[COORD(a.x, a.y-1, a.z, axisSize)];
contacts += space3d[COORD(a.x, a.y, a.z+1, axisSize)];
contacts += space3d[COORD(a.x, a.y, a.z-1, axisSize)];
}
return contacts / 2;
}
BeadMeasures proteinMeasures(const int3d *BBbeads, const int3d *SCbeads, const HPElem *hpChain, int hpSize){
int i;
// Create vectors with desired coordinates of beads
int3d *coordsAll = malloc(sizeof(int3d) * hpSize * 2);
int sizeAll = 0;
int3d *coordsBB = malloc(sizeof(int3d) * hpSize);
int sizeBB = 0;
int3d *coordsHB = malloc(sizeof(int3d) * hpSize * 2);
int sizeHB = 0;
int3d *coordsPB = malloc(sizeof(int3d) * hpSize * 2);
int sizePB = 0;
int3d *coordsHH = malloc(sizeof(int3d) * hpSize);
int sizeHH = 0;
int3d *coordsHP = malloc(sizeof(int3d) * hpSize);
int sizeHP = 0;
int3d *coordsPP = malloc(sizeof(int3d) * hpSize);
int sizePP = 0;
for(i = 0; i < hpSize; i++){
coordsAll[sizeAll++] = BBbeads[i];
coordsBB[sizeBB++] = BBbeads[i];
coordsHB[sizeHB++] = BBbeads[i];
coordsPB[sizePB++] = BBbeads[i];
}
for(i = 0; i < hpSize; i++){
coordsAll[sizeAll++] = SCbeads[i];
coordsHP[sizeHP++] = SCbeads[i];
if(hpChain[i] == 'H'){
coordsHH[sizeHH++] = SCbeads[i];
coordsHB[sizeHB++] = SCbeads[i];
} else {
coordsPP[sizePP++] = SCbeads[i];
coordsPB[sizePB++] = SCbeads[i];
}
}
BeadMeasures retval;
#pragma omp parallel for schedule(dynamic, 1)
for(i = 0; i < 7; i++){
int tid = omp_get_thread_num();
switch(i){
case 0:
retval.hh = count_contacts(tid, coordsHH, sizeHH);
break;
case 1:
retval.pp = count_contacts(tid, coordsPP, sizePP);
break;
case 2:
retval.hp = count_contacts(tid, coordsHP, sizeHP) - retval.hh - retval.pp; // HP = all - HH - PP
break;
case 3:
retval.bb = count_contacts(tid, coordsBB, sizeBB);
break;
case 4:
retval.hb = count_contacts(tid, coordsHB, sizeHB) - retval.hh - retval.bb; // HB = all - HH - BB
break;
case 5:
retval.pb = count_contacts(tid, coordsPB, sizePB) - retval.pp - retval.bb; // PB = all - PP - BB
break;
case 6:
retval.collisions = count_collisions(tid, coordsAll, sizeAll);
break;
default: break;
}
}
// Remove the trivial contacts
retval.bb -= (hpSize - 1);
retval.hb -= (sizeHH);
retval.pb -= (sizePP);
// Linearize amount of collisions and contacts
retval.hh = sqrt(retval.hh);
retval.pp = sqrt(retval.pp);
retval.hp = sqrt(retval.hp);
retval.bb = sqrt(retval.bb);
retval.hb = sqrt(retval.hb);
retval.pb = sqrt(retval.pb);
retval.collisions = sqrt(retval.collisions);
free(coordsAll);
free(coordsBB);
free(coordsHB);
free(coordsPB);
free(coordsHH);
free(coordsHP);
free(coordsPP);
return retval;
}
|
ep.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - EP
This benchmark is an OpenMP C version of the NPB EP code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Author: P. O. Frederickson
D. H. Bailey
A. C. Woo
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
#include "npb-C.h"
#include "npbparams.h"
/* parameters */
#define MK 16
#define MM (M - MK)
#define NN (1 << MM)
#define NK (1 << MK)
#define NQ 10
#define EPSILON 1.0e-8
#define A 1220703125.0
#define S 271828183.0
#define TIMERS_ENABLED FALSE
/* global variables */
/* common /storage/ */
static double x[2*NK];
#pragma omp threadprivate(x)
static double q[NQ];
/*--------------------------------------------------------------------
program EMBAR
c-------------------------------------------------------------------*/
/*
c This is the serial version of the APP Benchmark 1,
c the "embarassingly parallel" benchmark.
c
c M is the Log_2 of the number of complex pairs of uniform (0, 1) random
c numbers. MK is the Log_2 of the size of each batch of uniform random
c numbers. MK can be set for convenience on a given system, since it does
c not affect the results.
*/
int main(int argc, char **argv) {
double Mops, t1, t2, t3, t4, x1, x2, sx, sy, tm, an, tt, gc;
double dum[3] = { 1.0, 1.0, 1.0 };
int np, ierr, node, no_nodes, i, ik, kk, l, k, nit, ierrcode,
no_large_nodes, np_add, k_offset, j;
int nthreads = 1;
boolean verified;
char size[13+1]; /* character*13 */
/*
c Because the size of the problem is too large to store in a 32-bit
c integer for some classes, we put it into a string (for printing).
c Have to strip off the decimal point put in there by the floating
c point print statement (internal file)
*/
#ifndef POSIX
#ifndef NOBOMP
bomp_custom_init(NULL);
#endif
#endif
omp_set_num_threads(1);
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - EP Benchmark\n");
sprintf(size, "%12.0f", pow(2.0, M+1));
for (j = 13; j >= 1; j--) {
if (size[j] == '.') size[j] = ' ';
}
printf(" Number of random numbers generated: %13s\n", size);
verified = FALSE;
/*
c Compute the number of "batches" of random number pairs generated
c per processor. Adjust if the number of processors does not evenly
c divide the total number
*/
np = NN;
/*
c Call the random number generator functions and initialize
c the x-array to reduce the effects of paging on the timings.
c Also, call all mathematical functions that are used. Make
c sure these initializations cannot be eliminated as dead code.
*/
vranlc(0, &(dum[0]), dum[1], &(dum[2]));
dum[0] = randlc(&(dum[1]), dum[2]);
for (i = 0; i < 2*NK; i++)
{
x[i] = -1.0e99;
}
printf("Reached here ");
Mops = log(sqrt(fabs(max(1.0, 1.0))));
timer_clear(1);
timer_clear(2);
timer_clear(3);
timer_start(1);
vranlc(0, &t1, A, x);
/* Compute AN = A ^ (2 * NK) (mod 2^46). */
t1 = A;
for ( i = 1; i <= MK+1; i++) {
t2 = randlc(&t1, t1);
}
an = t1;
tt = S;
gc = 0.0;
sx = 0.0;
sy = 0.0;
for ( i = 0; i <= NQ - 1; i++) {
q[i] = 0.0;
}
/*
c Each instance of this loop may be performed independently. We compute
c the k offsets separately to take into account the fact that some nodes
c have more numbers to generate than others
*/
k_offset = -1;
#pragma omp parallel copyin(x)
{
double t1, t2, t3, t4, x1, x2;
int kk, i, ik, l;
double qq[NQ]; /* private copy of q[0:NQ-1] */
for (i = 0; i < NQ; i++) qq[i] = 0.0;
#pragma omp for reduction(+:sx,sy) schedule(static)
for (k = 1; k <= np; k++) {
kk = k_offset + k;
t1 = S;
t2 = an;
/* Find starting seed t1 for this kk. */
for (i = 1; i <= 100; i++) {
ik = kk / 2;
if (2 * ik != kk) t3 = randlc(&t1, t2);
if (ik == 0) break;
t3 = randlc(&t2, t2);
kk = ik;
}
/* Compute uniform pseudorandom numbers. */
if (TIMERS_ENABLED == TRUE) timer_start(3);
vranlc(2*NK, &t1, A, x-1);
if (TIMERS_ENABLED == TRUE) timer_stop(3);
/*
c Compute Gaussian deviates by acceptance-rejection method and
c tally counts in concentric square annuli. This loop is not
c vectorizable.
*/
if (TIMERS_ENABLED == TRUE) timer_start(2);
for ( i = 0; i < NK; i++) {
x1 = 2.0 * x[2*i] - 1.0;
x2 = 2.0 * x[2*i+1] - 1.0;
t1 = pow2(x1) + pow2(x2);
if (t1 <= 1.0) {
t2 = sqrt(-2.0 * log(t1) / t1);
t3 = (x1 * t2); /* Xi */
t4 = (x2 * t2); /* Yi */
l = max(fabs(t3), fabs(t4));
qq[l] += 1.0; /* counts */
sx = sx + t3; /* sum of Xi */
sy = sy + t4; /* sum of Yi */
}
}
if (TIMERS_ENABLED == TRUE) timer_stop(2);
}
#pragma omp critical
{
for (i = 0; i <= NQ - 1; i++) q[i] += qq[i];
}
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end of parallel region */
for (i = 0; i <= NQ-1; i++) {
gc = gc + q[i];
}
timer_stop(1);
tm = timer_read(1);
nit = 0;
if (M == 24) {
if((fabs((sx- (-3.247834652034740e3))/sx) <= EPSILON) &&
(fabs((sy- (-6.958407078382297e3))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 25) {
if ((fabs((sx- (-2.863319731645753e3))/sx) <= EPSILON) &&
(fabs((sy- (-6.320053679109499e3))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 28) {
if ((fabs((sx- (-4.295875165629892e3))/sx) <= EPSILON) &&
(fabs((sy- (-1.580732573678431e4))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 30) {
if ((fabs((sx- (4.033815542441498e4))/sx) <= EPSILON) &&
(fabs((sy- (-2.660669192809235e4))/sy) <= EPSILON)) {
verified = TRUE;
}
} else if (M == 32) {
if ((fabs((sx- (4.764367927995374e4))/sx) <= EPSILON) &&
(fabs((sy- (-8.084072988043731e4))/sy) <= EPSILON)) {
verified = TRUE;
}
}
Mops = pow(2.0, M+1)/tm/1000000.0;
printf("EP Benchmark Results: \n"
"CPU Time = %10.4f\n"
"N = 2^%5d\n"
"No. Gaussian Pairs = %15.0f\n"
"Sums = %25.15e %25.15e\n"
"Counts:\n",
tm, M, gc, sx, sy);
for (i = 0; i <= NQ-1; i++) {
printf("%3d %15.0f\n", i, q[i]);
}
c_print_results("EP", CLASS, M+1, 0, 0, nit, nthreads,
tm, Mops,
"Random numbers generated",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
if (TIMERS_ENABLED == TRUE) {
printf("Total time: %f", timer_read(1));
printf("Gaussian pairs: %f", timer_read(2));
printf("Random numbers: %f", timer_read(3));
}
}
|
imd_colrad.h | #include "imd.h"
#include <sys/time.h>
#include <gsl/gsl_integration.h>
#include <gsl/gsl_errno.h>
// #define USEFLOAT // hauptsächlich in der funktion genexptint. Profiling zeigte, dass
// hier die meiste zeit verbraucht wird -> float verdoppelt performance
#ifdef USEFLOAT
typedef float Real;
#define REALTYPE MPI_FLOAT
#else
typedef double Real;
#define REALTYPE MPI_DOUBLE
#endif
#ifdef USEFLOAT
#define EXPR expf //exp zu floaten ist eine ganz mieeese idee
#define SQRTR sqrtf
#define POWR powf
#define LOGR logf
#else
#define EXPR exp
#define SQRTR sqrt
#define POWR pow
#define LOGR log
#endif
// *********************************************************
// PHYSICAL CONSTANTS
// *********************************************************
// const double eV2J=1.6021766E-19;
const Real eV2H=0.03674932; //eV to Hartree
const Real colrad_reltol=1e-5;
const Real colrad_abstol=10.0;
// const Real J2eV=6.2415091E18;
const Real planck=6.62607004E-34; // J/s
const Real bohr_radius=0.52917721067E-10; // m
const Real bohr_radius_sq=2.800285202924816e-21;
const Real hbar_cub=1.172812163789953e-102; //hbar^3
const Real double_emass_pow_3_2 = 2.459112949719466e-45; // (2*emass)^3/2
const int MAXLINE = 255;
const Real pi=3.141592653589793;
const Real pi_sq=9.869604401089358;
const Real E_ion_H=13.6; // eV
const Real E_ion_H_J=2.178960176000000e-18; // J
const Real E_ion_H_sq_J=4.747867448593952e-36;
const Real colrad_tequi=1e-12;//TEST// 1e-12; //bei initial equi ohne Temperatur-variation erst einmal
//die Saha-besetzungsdichten equilibrieren
//const double LIGHTSPEED=2.997925458e8; // m/s
Real LASERFREQ;
int colrad_ydot(double t, N_Vector u, N_Vector udot, void *user_data);
void do_Saha(Real Te,Real totalc,Real ne,N_Vector y);
int colrad_GetCoeffs(N_Vector y,Real It, void * user_data);
// Die Zwei müssen nach Prototypes.h
// void do_colrad(double dt);
// void colrad_init(void);
void colrad_read_states(void);
void colrad_Saha_init(int i,int j,int k);
// ******************************************************************************
// * CROSS SECTION INTEGRATION STUFF
// ******************************************************************************
gsl_integration_workspace * winteg_inner=NULL;
gsl_integration_workspace * winteg_outer=NULL;
gsl_integration_workspace * winteg_fermi=NULL;
gsl_integration_workspace * winteg_exc=NULL; //excitation
gsl_integration_romberg_workspace * winteg_rb_inner=NULL;
gsl_integration_romberg_workspace * winteg_rb_outer=NULL;
struct my_f_params { Real ne; Real T;Real mu; Real E;Real DeltaE; int allowed;};
// struct my_f_params fparams_inner; //For inner integrand
// struct my_f_params fparams_outer; //outer integrand
// struct my_f_params fparams_fermi;
// struct my_f_params fparams_exc;
double inner_integrand_ionization(double x, void *p); // integrate along E'
double outer_integrand_ionization(double x,void *p); // integrate along E
Real double_integral_ionization(Real ne,Real T, Real mu, Real DeltaE); //evaluates double integral
double inner_integrand_recombination(double x, void *p);
double outer_integrand_recombination(double x,void *p);
Real double_integral_recombination(Real ne,Real T, Real mu, Real DeltaE);
double integrand_excitation(double x,void *p);
Real eval_excitation_integral(Real ne,Real T,Real mu, Real DeltaE, int allowed);
Real eval_dexcitation_integral(Real ne,Real T,Real mu, Real DeltaE, int allowed);
double integrand_deexcitation(double x,void *p);
double fermi_integrand(double x, void *p);
Real eval_fermi_integrand(Real ne,Real T, Real mu);
double integrand_excitation_debug(double x,void *p);
double outer_integrand_ionization2(double x,struct my_f_params* p);
Real double_integral_ionization2(Real ne,Real T, Real mu, Real DeltaE); //evaluates double integral
double inner_integrand_ionization2(double x, struct my_f_params* p);
// **********************************************************************************************
// * PAR INTEGRAL STUFF
// **********************************************************************************************
int terminate_gkq;
int terminate_gkq_outer;
int terminate_gkq_inner;
int terminate_serial;
int gkq_iter_serial; // nr of iterations
const double gkq_alpha=0.816496580927726;
const double gkq_beta=0.447213595499958;
static const double xgkq[12] =
{
0.0,
-0.942882415695480,
-0.816496580927726,
-0.641853342345781,
-0.447213595499958,
-0.236383199662150,
0.0,
0.236383199662150,
0.447213595499958,
0.641853342345781,
0.816496580927726,
0.942882415695480
};
Real integral_simpson(Real (*f)(Real, void*), Real a, Real b,int n,void* p);
int simpson_error;
const Real tolmax=1e-20;
const Real simpson_itermax=120;
#define INITIAL_STACK_SIZE 128 /* initial size of new stacks */
/* the stack structure */
struct stack_s{
int el_count; /* count of elements on stack */
int el_size; /* size of an element */
int mem_reserve; /* allocated memory for stack */
void* elements; /* pointer to begin of stack */
};
typedef struct _work_t{
double a;
double b;
double tol;
double S;
double fa;
double fb;
double fm;
double rec;
int iter;
struct my_f_params * p; //pointer auf params
} work_t;
typedef struct _work_t_gkq{
double a;
double b;
double toler;
double I_13;
double I_prev;
double fa;
double fb;
struct my_f_params * p; //pointer auf params
shortint iter;
} work_gkq;
typedef struct stack_s* stack_t;
double integral_simpson_par(double (*f)(double, struct my_f_params*), stack_t stack);
double gkq_adapt_OMP(double (*f)(double, struct my_f_params*), stack_t stack);
double gkq_OMP(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p,stack_t stack);
double gkq_serial(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p);
double gkq_adapt_serial(double (*f)(double, struct my_f_params*), double a, double b,
double fa,double fb, double toler,double I_13, struct my_f_params* p);
// void create_stack(stack_t* stack, int element_size);
// int empty_stack(stack_t stack);
// void push_stack(stack_t stack, void* element);
// void pop_stack(stack_t stack, void* element);
/******************************************
* create new stack
******************************************/
void create_stack(
stack_t* stack, /* stack to create */
int element_size) /* size of a stack element */
{
int initial_size = INITIAL_STACK_SIZE;
/* allocate memory for new stack struct */
(*stack) = (stack_t) malloc(sizeof(struct stack_s));
if (!(*stack)){
char errstr[255];
sprintf(errstr, "error: could not allocate memory for stack.. Abort.\n");
error(errstr);
// exit(1);
}
/* allocate memory for stack elements */
(*stack)->elements = (void*) malloc(element_size * initial_size);
(*stack)->mem_reserve = initial_size;
if (!(*stack)->elements){
char errstr[255];
sprintf(errstr, "error: could not allocate memory for stack.. Abort.\n");
error(errstr);
}
(*stack)->el_size = element_size;
(*stack)->el_count = 0;
}
/*****************************************
* check if the stack is empty
*****************************************/
int empty_stack(stack_t stack)
{
return stack->el_count <= 0;
}
/*****************************************
* push a element on stack
*****************************************/
void push_stack(stack_t stack, /* target stack */
void* element) /* element to push */
{
int i, new_reserve;
int log2_count;
/* check if we need more memory for stack */
if (stack->el_count >= stack->mem_reserve)
{
/* calculate new size for the stack
it should be a power of two */
for (i = stack->el_count, log2_count = 0;
i > 0;
i>>1, log2_count++);
new_reserve = 1 << log2_count;
/* reallocate memory for phase thread tables
and nullify new values */
stack->elements = (void *) realloc(stack->elements,
stack->el_size * new_reserve);
if (!stack->elements){
char errstr [255];
sprintf(errstr, "error: can't reallocate stack.. Aborting\n");
error(errstr);
// exit(1);
}
stack->mem_reserve = new_reserve;
}
/* now push the element on top of the stack */
memcpy((char*)stack->elements + stack->el_count*stack->el_size,
element, stack->el_size);
stack->el_count++;
}
/*****************************************
* pop an element from stack
*****************************************/
void pop_stack(
stack_t stack, /* target stack */
void* element) /* where poped el. should be stored */
{
if (stack->el_count <= 0){
char errstr[255];
sprintf(errstr, "error: trying to pop from empty stack.\n");
error(errstr);
// exit(2);
}
stack->el_count--;
memcpy(element,
(char*)stack->elements + stack->el_count*stack->el_size,
stack->el_size);
}
// ***************************************************************************
// * Gauss-kronard quadrature, parallel
// ***************************************************************************
double gkq_OMP(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p,stack_t stack)
{
//1st integration
double result=0.0;
// *********************************************
double m=0.5*(a+b);
double h=0.5*(b-a);
double y[13];
double fa=y[0]=f(a,p);
double fb=y[12]=f(b,p);
int i;
for(i=1;i<12;i++)
y[i]=f(m+xgkq[i]*h,p);
double I_4= (h/6.0)*(y[0]+y[12]+5.0*(y[4]+y[8])); // 4-point gauss-lobatto
double I_7= (h/1470.0)*(77.0*(y[0]+y[12])+432.0*(y[2]+y[10])+ // 7-point kronrod
625.0*(y[4]+y[8])+672.0*y[6]);
double I_13= h*(0.0158271919734802*(y[0]+y[12])+0.0942738402188500*(y[1]+y[11])+0.155071987336585*(y[2]+y[10])+
0.188821573960182*(y[3]+y[9])+0.199773405226859*(y[4]+y[8])+0.224926465333340*(y[5]+y[7])+
0.242611071901408*y[6]); //13-point Kronrod
double Err1=fabs(I_7-I_13);
double Err2=fabs(I_4-I_13);
double r=(Err2 != 0.0) ? Err1/Err2 : 1.0;
double toler=(r > 0.0 && r < 1.0) ? TOL/r : TOL;
if(I_13 == 0)
I_13=b-a;
I_13=fabs(I_13);
//Prepare work and push onto stack
work_gkq work;
work.a = a;
work.b = b;
work.toler = toler;
work.I_13=I_13;
work.fa=fa;
work.fb=fb;
work.p=p;
work.I_prev=I_7;
//ANTI-FOLGENDES:
//OUT OF TOLERANCE !!!, mll:3.0162e-18, a:3.0162e-18, b:3.0162e-18, mrr:3.0162e-18,I_7-I_4:0.0000e+00, tol:1.6002e-315,I_13:7.0585e-313
if(I_13 < 1e-150)
return 0;
push_stack(stack, &work);
result=gkq_adapt(f,stack);
return result;
}
double gkq_serial(double (*f)(double, struct my_f_params*), double a, double b, double TOL, struct my_f_params* p)
{
//1st integration
double result=0.0;
gkq_iter_serial=0;
// *********************************************
double m=0.5*(a+b);
double h=0.5*(b-a);
double y[13];
double fa=y[0]=f(a,p);
double fb=y[12]=f(b,p);
int i;
for(i=1;i<12;i++)
y[i]=f(m+xgkq[i]*h,p);
double I_4= (h/6.0)*(y[0]+y[12]+5.0*(y[4]+y[8])); // 4-point gauss-lobatto
double I_7= (h/1470.0)*(77.0*(y[0]+y[12])+432.0*(y[2]+y[10])+ // 7-point kronrod
625.0*(y[4]+y[8])+672.0*y[6]);
double I_13= h*(0.0158271919734802*(y[0]+y[12])+0.0942738402188500*(y[1]+y[11])+0.155071987336585*(y[2]+y[10])+
0.188821573960182*(y[3]+y[9])+0.199773405226859*(y[4]+y[8])+0.224926465333340*(y[5]+y[7])+
0.242611071901408*y[6]); //13-point Kronrod
double Err1=fabs(I_7-I_13);
double Err2=fabs(I_4-I_13);
double r=(Err2 != 0.0) ? Err1/Err2 : 1.0;
double toler=(r > 0.0 && r < 1.0) ? TOL/r : TOL;
if(I_13 == 0)
I_13=b-a;
I_13=fabs(I_13);
result=gkq_adapt_serial(f,a,b,fa,fb,toler,I_13, p);
return result;
}
// ***********************************************
// * RECURSIVE ADAPTION ROUTINE FOR PARALLEL-GK-QUADRATURE
// **********************************************
double gkq_adapt_OMP(double (*f)(double, struct my_f_params*), stack_t stack)
{
work_gkq work;
work.iter=0;
int ready, idle, busy;
double integral_result = 0.0;
busy = 0;
terminate_gkq=0;
#pragma omp parallel default(none) \
shared(stack, integral_result,f,busy,terminate_gkq,myid) \
private(work, idle, ready)
{
// printf("me:%d, err:%d\n",omp_get_thread_num(),simpson_error);
ready = 0;
idle = 1;
while(!ready) // && !terminate_gkq)// && !simpson_error) //<-- so NICHT!
{
#pragma omp critical (stack)
{
if (!empty_stack(stack))
{
/* we have new work */
pop_stack(stack, &work);
if (idle)
{
/* say others i'm busy */
busy += 1;
idle = 0;
}
}
else
{
/* no new work on stack */
if (!idle){
busy -= 1;
idle = 1;
}
/* nobody has anything to do; let us leave the loop */
if (busy == 0)
{
ready = 1;
}
}
} /* end critical(stack) */
if (idle)
continue; //if ready==1 --> leave loop
double I_prev=work.I_prev;
double a = work.a;
double b = work.b;
double toler = work.toler;
double I_13=work.I_13;
double fa=work.fa;
double fb=work.fb;
int iter=work.iter;
// double *y= work.y; // brauch ich nicht!
struct my_f_params * p = work.p;
double m = (a+b)/2;
double h = (b -a)/2;
double mll=m-gkq_alpha*h;
double ml=m-gkq_beta*h;
double mr=m+gkq_beta*h;
double mrr=m+gkq_alpha*h;
double fmll=f(mll,p);
double fml=f(ml,p);
double fm=f(m,p);
double fmr=f(mr,p);
double fmrr=f(mrr,p);
double I_4=h/6.0*(fa+fb+5.0*(fml+fmr)); // 4-point Gauss-Lobatto formula.
double I_7=h/1470.0*(77.0*(fa+fb)+432.0*(fmll+fmrr)+625.0*(fml+fmr)+672.0*fm);
// if(myid==1)
// printf("I_7:%.4e, I_13:%.4e,I_4:%.4e, minus:%.4e, to:%.4e\n",I_7,I_13,I_4,I_7-I_4, toler*I_13);
int maxiter=50; //max. subdivisions
double abstol=1e-30;
work.I_prev=I_7; // für abstolcheck in nächster recursion
if (fabs(I_7-I_4) <= toler*I_13 || mll <= a || b <= mrr || iter > maxiter || fabs(I_7-I_prev) < abstol )
{
if ((mll <= a || b <= mrr)) //Error
{
// out_of_tolerance=true; // Interval contains no more machine numbers
// printf("OUT OF TOLERANCE !!!, mll:%.4e, a:%.4e, b:%.4e, mrr:%.4e,I_7-I_4:%.4e, tol:%.4e,I_13:%.4e\n",
// mll,b,b,mrr,I_7-I_4, toler*I_13,I_13);
terminate_gkq=1;
}
#pragma omp critical (integral_result)
{
integral_result += I_7; //Terminate recursion.
}
// printf("me ok:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f,ubteg;%.4e\n", omp_get_thread_num(), a,b,toler,I_4,I_7,integral_result);
}
else //subdivide interval and push new work on stack
{
#pragma omp critical (stack)
{
// printf("me NOOOO:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f\n", omp_get_thread_num(), a,b,toler,I_4,I_7);
work.iter=iter+1;
work.a=a;
work.b=mll;
work.fa=fa;
work.fb=fmll;
push_stack(stack, &work);
work.a=mll;
work.b=ml;
work.fa=fmll;
work.fb=fml;
push_stack(stack, &work);
work.a=ml;
work.b=m;
work.fa=fml;
work.fb=fm;
push_stack(stack, &work);
work.a=m;
work.b=mr;
work.fa=fm;
work.fb=fmr;
push_stack(stack, &work);
work.a=mr;
work.b=mrr;
work.fa=fmr;
work.fb=fmrr;
push_stack(stack, &work);
work.a=mrr;
work.b=b;
work.fa=fmrr;
work.fb=fb;
push_stack(stack, &work);
} // pragma critical stack
} // else ..non-acceptable error
} // while
} /* end omp parallel */
return integral_result;
}
double gkq_adapt_serial(double (*f)(double, struct my_f_params*), double a, double b, double fa,
double fb, double toler,double I_13, struct my_f_params* p)
{
double m = (a+b)/2;
double h = (b -a)/2;
double mll=m-gkq_alpha*h;
double ml=m-gkq_beta*h;
double mr=m+gkq_beta*h;
double mrr=m+gkq_alpha*h;
double fmll=f(mll,p);
double fml=f(ml,p);
double fm=f(m,p);
double fmr=f(mr,p);
double fmrr=f(mrr,p);
double I_4=h/6.0*(fa+fb+5.0*(fml+fmr)); // 4-point Gauss-Lobatto formula.
double I_7=h/1470.0*(77.0*(fa+fb)+432.0*(fmll+fmrr)+625.0*(fml+fmr)+672.0*fm);
gkq_iter_serial++;
if ( (fabs(I_7-I_4) <= toler*I_13 || mll <= a || b <= mrr) && gkq_iter_serial)
{
if ((mll <= a || b <= mrr) && !terminate_serial) //Error
{
// out_of_tolerance=true; // Interval contains no more machine numbers
printf("OUT OF TOLERANCE !!!, mll:%.4e, a:%.4e, b:%.4e, mrr:%.4e\n", mll,b,b,mrr);
terminate_serial=1;
}
// printf("me ok:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f\n", omp_get_thread_num(), a,b,toler,I_4,I_7);
return I_7;
}
else
{
// printf("me NOOOO:%d, a:%f,b:%f, tler:%.5e,I_4:%f,I_7:%f\n", omp_get_thread_num(), a,b,toler,I_4,I_7);
return gkq_adapt_serial(f, a,mll,fa,fmll,toler,I_13,p) +
gkq_adapt_serial(f, mll,ml,fmll,fml,toler,I_13,p) +
gkq_adapt_serial(f, ml,m,fml,fm,toler,I_13,p) +
gkq_adapt_serial(f, m,mr,fm,fmr,toler,I_13,p) +
gkq_adapt_serial(f, mr,mrr,fmr,fmrr,toler,I_13,p) +
gkq_adapt_serial(f, mrr,b,fmrr,fb,toler,I_13,p);
}
} |
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
const float* k0 = kernel + p*inch*9;
const float* k1 = kernel + (p+1)*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr0n = outptr0 + outw;
float* outptr1n = outptr1 + outw;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k03 = vld1q_f32(k0+3);
float32x4_t _k06 = vld1q_f32(k0+6);
float32x4_t _k10 = vld1q_f32(k1);
float32x4_t _k13 = vld1q_f32(k1+3);
float32x4_t _k16 = vld1q_f32(k1+6);
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n"// r0
"add %5, %5, #16 \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v14.4s, v15.4s}, [%8] \n"// r3
"add %8, %8, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v14.16b, v15.16b, #8 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v6.4s}, [%1] \n"// _sum0
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v7.4s}, [%2] \n"// _sum1
"fmla v6.4s, v8.4s, %18.s[0] \n"
"fmla v7.4s, v8.4s, %21.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v12.4s}, [%3] \n"// _sum0n
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v13.4s}, [%4] \n"// _sum1n
"fmla v12.4s, v14.4s, %20.s[0] \n"
"fmla v13.4s, v14.4s, %23.s[0] \n"
"ext v8.16b, v8.16b, v9.16b, #8 \n"
"ext v9.16b, v14.16b, v15.16b, #4 \n"
"fmla v6.4s, v10.4s, %18.s[1] \n"
"fmla v7.4s, v10.4s, %21.s[1] \n"
"fmla v12.4s, v11.4s, %20.s[2] \n"
"fmla v13.4s, v11.4s, %23.s[2] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v14.4s, v15.4s}, [%6] \n"// r1
"add %6, %6, #16 \n"
"fmla v6.4s, v8.4s, %18.s[2] \n"
"fmla v7.4s, v8.4s, %21.s[2] \n"
"fmla v12.4s, v9.4s, %20.s[1] \n"
"fmla v13.4s, v9.4s, %23.s[1] \n"
"ext v10.16b, v14.16b, v15.16b, #4 \n"
"fmla v6.4s, v14.4s, %19.s[0] \n"
"fmla v7.4s, v14.4s, %22.s[0] \n"
"fmla v12.4s, v14.4s, %18.s[0] \n"
"fmla v13.4s, v14.4s, %21.s[0] \n"
"ext v11.16b, v14.16b, v15.16b, #8 \n"
"fmla v6.4s, v10.4s, %19.s[1] \n"
"fmla v7.4s, v10.4s, %22.s[1] \n"
"fmla v12.4s, v10.4s, %18.s[1] \n"
"fmla v13.4s, v10.4s, %21.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"ld1 {v8.4s, v9.4s}, [%7] \n"// r2
"add %7, %7, #16 \n"
"fmla v6.4s, v11.4s, %19.s[2] \n"
"fmla v7.4s, v11.4s, %22.s[2] \n"
"fmla v12.4s, v11.4s, %18.s[2] \n"
"fmla v13.4s, v11.4s, %21.s[2] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"fmla v6.4s, v8.4s, %20.s[0] \n"
"fmla v7.4s, v8.4s, %23.s[0] \n"
"fmla v12.4s, v8.4s, %19.s[0] \n"
"fmla v13.4s, v8.4s, %22.s[0] \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v6.4s, v10.4s, %20.s[1] \n"
"fmla v7.4s, v10.4s, %23.s[1] \n"
"fmla v12.4s, v10.4s, %19.s[1] \n"
"fmla v13.4s, v10.4s, %22.s[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n"// r0
"add %5, %5, #16 \n"
"fmla v6.4s, v11.4s, %20.s[2] \n"
"fmla v7.4s, v11.4s, %23.s[2] \n"
"fmla v12.4s, v11.4s, %19.s[2] \n"
"fmla v13.4s, v11.4s, %22.s[2] \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v14.4s, v15.4s}, [%8] \n"// r3
"add %8, %8, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%2], #16 \n"
"ext v11.16b, v14.16b, v15.16b, #8 \n"
"st1 {v12.4s}, [%3], #16 \n"
"st1 {v13.4s}, [%4], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %5, %5, #16 \n"
"sub %8, %8, #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr0n), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr0n),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k00), // %18
"w"(_k03), // %19
"w"(_k06), // %20
"w"(_k10), // %21
"w"(_k13), // %22
"w"(_k16) // %23
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5 :64] \n"// r0
"add %5, #16 \n"
"pld [%8, #192] \n"
"vld1.f32 {d28-d30}, [%8] \n"// r3
"add %8, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q14, q15, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1 :64] \n"// _sum0
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2 :64] \n"// _sum1
"vmla.f32 q6, q8, %e18[0] \n"
"vmla.f32 q7, q8, %e21[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d24-d25}, [%3] \n"// _sum0n
"pld [%4, #128] \n"
"vld1.f32 {d26-d27}, [%4] \n"// _sum1n
"vmla.f32 q12, q14, %e20[0] \n"
"vmla.f32 q13, q14, %e23[0] \n"
"vext.32 q8, q8, q9, #2 \n"
"vext.32 q9, q14, q15, #1 \n"
"vmla.f32 q6, q10, %e18[1] \n"
"vmla.f32 q7, q10, %e21[1] \n"
"vmla.f32 q12, q11, %f20[0] \n"
"vmla.f32 q13, q11, %f23[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d28-d30}, [%6] \n"// r1
"add %6, #16 \n"
"vmla.f32 q6, q8, %f18[0] \n"
"vmla.f32 q7, q8, %f21[0] \n"
"vmla.f32 q12, q9, %e20[1] \n"
"vmla.f32 q13, q9, %e23[1] \n"
"vext.32 q10, q14, q15, #1 \n"
"vmla.f32 q6, q14, %e19[0] \n"
"vmla.f32 q7, q14, %e22[0] \n"
"vmla.f32 q12, q14, %e18[0] \n"
"vmla.f32 q13, q14, %e21[0] \n"
"vext.32 q11, q14, q15, #2 \n"
"vmla.f32 q6, q10, %e19[1] \n"
"vmla.f32 q7, q10, %e22[1] \n"
"vmla.f32 q12, q10, %e18[1] \n"
"vmla.f32 q13, q10, %e21[1] \n"
"pld [%7, #192] \n"
"vld1.f32 {d16-d18}, [%7 :64] \n"// r2
"add %7, #16 \n"
"vmla.f32 q6, q11, %f19[0] \n"
"vmla.f32 q7, q11, %f22[0] \n"
"vmla.f32 q12, q11, %f18[0] \n"
"vmla.f32 q13, q11, %f21[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vmla.f32 q6, q8, %e20[0] \n"
"vmla.f32 q7, q8, %e23[0] \n"
"vmla.f32 q12, q8, %e19[0] \n"
"vmla.f32 q13, q8, %e22[0] \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e20[1] \n"
"vmla.f32 q7, q10, %e23[1] \n"
"vmla.f32 q12, q10, %e19[1] \n"
"vmla.f32 q13, q10, %e22[1] \n"
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5 :64] \n"// r0
"add %5, #16 \n"
"vmla.f32 q6, q11, %f20[0] \n"
"vmla.f32 q7, q11, %f23[0] \n"
"vmla.f32 q12, q11, %f19[0] \n"
"vmla.f32 q13, q11, %f22[0] \n"
"pld [%8, #192] \n"
"vld1.f32 {d28-d30}, [%8] \n"// r3
"add %8, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vst1.f32 {d12-d13}, [%1 : 64]!\n"
"vst1.f32 {d14-d15}, [%2 : 64]!\n"
"vext.32 q11, q14, q15, #2 \n"
"vst1.f32 {d24-d25}, [%3]! \n"
"vst1.f32 {d26-d27}, [%4]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %5, #16 \n"
"sub %8, #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr0n), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr0n),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k00), // %18
"w"(_k03), // %19
"w"(_k06), // %20
"w"(_k10), // %21
"w"(_k13), // %22
"w"(_k16) // %23
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
float32x4_t _sum0n = vmulq_f32(_r10, _k00);
float32x4_t _sum1n = vmulq_f32(_r10, _k10);
_sum0n = vmlaq_f32(_sum0n, _r20, _k03);
_sum1n = vmlaq_f32(_sum1n, _r20, _k13);
_sum0n = vmlaq_f32(_sum0n, _r30, _k06);
_sum1n = vmlaq_f32(_sum1n, _r30, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
_sum0n = vsetq_lane_f32(*outptr0n, _sum0n, 3);
_sum1n = vsetq_lane_f32(*outptr1n, _sum1n, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
*outptr0n = vaddvq_f32(_sum0n);
*outptr1n = vaddvq_f32(_sum1n);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss0n = vadd_f32(vget_low_f32(_sum0n), vget_high_f32(_sum0n));
float32x2_t _ss1n = vadd_f32(vget_low_f32(_sum1n), vget_high_f32(_sum1n));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
float32x2_t _ss01n = vpadd_f32(_ss0n, _ss1n);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
*outptr0n = vget_lane_f32(_ss01n, 0);
*outptr1n = vget_lane_f32(_ss01n, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum0n = 0.f;
float sum1 = 0.f;
float sum1n = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
sum0n += r1[0] * k0[0];
sum0n += r1[1] * k0[1];
sum0n += r1[2] * k0[2];
sum0n += r2[0] * k0[3];
sum0n += r2[1] * k0[4];
sum0n += r2[2] * k0[5];
sum0n += r3[0] * k0[6];
sum0n += r3[1] * k0[7];
sum0n += r3[2] * k0[8];
sum1n += r1[0] * k1[0];
sum1n += r1[1] * k1[1];
sum1n += r1[2] * k1[2];
sum1n += r2[0] * k1[3];
sum1n += r2[1] * k1[4];
sum1n += r2[2] * k1[5];
sum1n += r3[0] * k1[6];
sum1n += r3[1] * k1[7];
sum1n += r3[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr0n += sum0n;
*outptr1n += sum1n;
#endif // __ARM_NEON
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr1++;
outptr0n++;
outptr1n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr1 += outw;
outptr0n += outw;
outptr1n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4s, v9.4s}, [%3] \n"// r0
"add %3, %3, #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v6.4s}, [%1] \n"// _sum0
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v7.4s}, [%2] \n"// _sum1
"fmul v14.4s, v8.4s, %12.s[0] \n"
"fmul v15.4s, v8.4s, %15.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v6.4s, v10.4s, %12.s[1] \n"
"fmla v7.4s, v10.4s, %15.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4s, v9.4s}, [%4] \n"// r1
"add %4, %4, #16 \n"
"fmla v14.4s, v11.4s, %12.s[2] \n"
"fmla v15.4s, v11.4s, %15.s[2] \n"
"fmla v6.4s, v8.4s, %13.s[0] \n"
"fmla v7.4s, v8.4s, %16.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v14.4s, v10.4s, %13.s[1] \n"
"fmla v15.4s, v10.4s, %16.s[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n"// r2
"add %5, %5, #16 \n"
"fmla v6.4s, v11.4s, %13.s[2] \n"
"fmla v7.4s, v11.4s, %16.s[2] \n"
"fmla v14.4s, v8.4s, %14.s[0] \n"
"fmla v15.4s, v8.4s, %17.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v6.4s, v10.4s, %14.s[1] \n"
"fmla v7.4s, v10.4s, %17.s[1] \n"
"fmla v14.4s, v11.4s, %14.s[2] \n"
"fmla v15.4s, v11.4s, %17.s[2] \n"
"fadd v6.4s, v6.4s, v14.4s \n"
"fadd v7.4s, v7.4s, v15.4s \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%2], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #192] \n"
"vld1.f32 {d16-d18}, [%3] \n"// r0
"add %3, #16 \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1] \n"// _sum0
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2] \n"// _sum1
"vmul.f32 q14, q8, %e12[0] \n"
"vmul.f32 q15, q8, %e15[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e12[1] \n"
"vmla.f32 q7, q10, %e15[1] \n"
"pld [%4, #192] \n"
"vld1.f32 {d16-d18}, [%4] \n"// r1
"add %4, #16 \n"
"vmla.f32 q14, q11, %f12[0] \n"
"vmla.f32 q15, q11, %f15[0] \n"
"vmla.f32 q6, q8, %e13[0] \n"
"vmla.f32 q7, q8, %e16[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q14, q10, %e13[1] \n"
"vmla.f32 q15, q10, %e16[1] \n"
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5] \n"// r2
"add %5, #16 \n"
"vmla.f32 q6, q11, %f13[0] \n"
"vmla.f32 q7, q11, %f16[0] \n"
"vmla.f32 q14, q8, %e14[0] \n"
"vmla.f32 q15, q8, %e17[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e14[1] \n"
"vmla.f32 q7, q10, %e17[1] \n"
"vmla.f32 q14, q11, %f14[0] \n"
"vmla.f32 q15, q11, %f17[0] \n"
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q15 \n"
"vst1.f32 {d12-d13}, [%1]! \n"
"vst1.f32 {d14-d15}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
#endif // __ARM_NEON
r0++;
r1++;
r2++;
outptr0++;
outptr1++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9;
k1 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* kernel0 = kernel + p*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k3456 = vld1q_f32(kernel0+3);
float32x4_t _k6789 = vld1q_f32(kernel0+6);
#else
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v9.4s, v10.4s}, [%3] \n"// r0
"add %3, %3, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n"// _sum
"fmla v7.4s, v9.4s, %14.s[0] \n"
"fmul v6.4s, v11.4s, %14.s[1] \n"
"fmul v13.4s, v12.4s, %14.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v9.4s, v10.4s}, [%4] \n"// r1
"add %4, %4, #16 \n"
"fmla v7.4s, v9.4s, %15.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"fmla v6.4s, v11.4s, %15.s[1] \n"
"fmla v13.4s, v12.4s, %15.s[2] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v8.4s}, [%2] \n"// _sum2
"fmla v8.4s, v9.4s, %14.s[0] \n"
"fmul v14.4s, v11.4s, %14.s[1] \n"
"fmul v15.4s, v12.4s, %14.s[2] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v9.4s, v10.4s}, [%5] \n"// r2
"add %5, %5, #16 \n"
"fmla v7.4s, v9.4s, %16.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"fmla v6.4s, v11.4s, %16.s[1] \n"
"fmla v13.4s, v12.4s, %16.s[2] \n"
"fmla v8.4s, v9.4s, %15.s[0] \n"
"fmla v14.4s, v11.4s, %15.s[1] \n"
"fmla v15.4s, v12.4s, %15.s[2] \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v9.4s, v10.4s}, [%6] \n"// r3
"add %6, %6, #16 \n"
"fmla v8.4s, v9.4s, %16.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"fmla v14.4s, v11.4s, %16.s[1] \n"
"fmla v15.4s, v12.4s, %16.s[2] \n"
"fadd v7.4s, v7.4s, v6.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v9.4s, v10.4s}, [%3] \n"// r0
"fadd v8.4s, v8.4s, v14.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"fadd v8.4s, v8.4s, v15.4s \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n"
"add %3, %3, #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v8.4s}, [%2], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %3, %3, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k3456), // %15
"w"(_k6789) // %16
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%3, #192] \n"
"vld1.f32 {d18-d20}, [%3 :64] \n"// r0
"add %3, #16 \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1 :64] \n"// _sum
"vmla.f32 q7, q9, %e14[0] \n"
"vmul.f32 q6, q11, %e14[1] \n"
"vmul.f32 q13, q12, %f14[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d18-d20}, [%4] \n"// r1
"add %4, #16 \n"
"vmla.f32 q7, q9, %e15[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q6, q11, %e15[1] \n"
"vmla.f32 q13, q12, %f15[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d16-d17}, [%2] \n"// _sum2
"vmla.f32 q8, q9, %e14[0] \n"
"vmul.f32 q14, q11, %e14[1] \n"
"vmul.f32 q15, q12, %f14[0] \n"
"pld [%5, #192] \n"
"vld1.f32 {d18-d20}, [%5 :64] \n"// r2
"add %5, #16 \n"
"vmla.f32 q7, q9, %e16[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q6, q11, %e16[1] \n"
"vmla.f32 q13, q12, %f16[0] \n"
"vmla.f32 q8, q9, %e15[0] \n"
"vmla.f32 q14, q11, %e15[1] \n"
"vmla.f32 q15, q12, %f15[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d18-d20}, [%6] \n"// r3
"add %6, #16 \n"
"vmla.f32 q8, q9, %e16[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q14, q11, %e16[1] \n"
"vmla.f32 q15, q12, %f16[0] \n"
"vadd.f32 q7, q7, q6 \n"
"pld [%3, #192] \n"
"vld1.f32 {d18-d20}, [%3 :64] \n"// r0
"vadd.f32 q8, q8, q14 \n"
"vadd.f32 q7, q7, q13 \n"
"vadd.f32 q8, q8, q15 \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"add %3, #16 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"vst1.f32 {d16-d17}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %3, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k3456), // %15
"w"(_k6789) // %16
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
float32x4_t _sum2 = vmulq_f32(_r10, _k0123);
_sum2 = vmlaq_f32(_sum2, _r20, _k3456);
_sum2 = vmlaq_f32(_sum2, _r30, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
_sum2 = vsetq_lane_f32(*outptr2, _sum2, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
*outptr2 = vaddvq_f32(_sum2);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _sss2 = vpadd_f32(_ss, _ss2);
*outptr = vget_lane_f32(_sss2, 0);
*outptr2 = vget_lane_f32(_sss2, 1);
#endif // __aarch64__
#else
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
#endif
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.4s, v9.4s}, [%2] \n"// r0
"add %2, %2, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n"// _sum
"fmla v7.4s, v8.4s, %10.s[0] \n"
"fmul v13.4s, v10.4s, %10.s[1] \n"
"fmul v14.4s, v11.4s, %10.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v8.4s, v9.4s}, [%3] \n"// r1
"add %3, %3, #16 \n"
"fmla v7.4s, v8.4s, %11.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v13.4s, v10.4s, %11.s[1] \n"
"fmla v14.4s, v11.4s, %11.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v8.4s, v9.4s}, [%4] \n"// r2
"add %4, %4, #16 \n"
"fmla v7.4s, v8.4s, %12.s[0] \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"fmla v13.4s, v10.4s, %12.s[1] \n"
"fmla v14.4s, v11.4s, %12.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.4s, v9.4s}, [%2] \n"// r0
"add %2, %2, #16 \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"fadd v7.4s, v7.4s, v14.4s \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n"
"ext v11.16b, v8.16b, v9.16b, #8 \n"
"st1 {v7.4s}, [%1], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %2, %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #192] \n"
"vld1.f32 {d16-d18}, [%2] \n"// r0
"add %2, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n"// _sum
"vmla.f32 q7, q8, %e10[0] \n"
"vmul.f32 q13, q10, %e10[1] \n"
"vmul.f32 q14, q11, %f10[0] \n"
"pld [%3, #192] \n"
"vld1.f32 {d16-d18}, [%3] \n"// r1
"add %3, #16 \n"
"vmla.f32 q7, q8, %e11[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q13, q10, %e11[1] \n"
"vmla.f32 q14, q11, %f11[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d16-d18}, [%4] \n"// r2
"add %4, #16 \n"
"vmla.f32 q7, q8, %e12[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q13, q10, %e12[1] \n"
"vmla.f32 q14, q11, %f12[0] \n"
"pld [%2, #192] \n"
"vld1.f32 {d16-d18}, [%2] \n"// r0
"add %2, #16 \n"
"vadd.f32 q7, q7, q13 \n"
"vadd.f32 q7, q7, q14 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
#endif
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_winograd64_transform_kernel_neon(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(8*8, inch, outch);
const float ktm[8][3] = {
{ 1.0f, 0.0f, 0.0f},
{-2.0f/9, -2.0f/9, -2.0f/9},
{-2.0f/9, 2.0f/9, -2.0f/9},
{1.0f/90, 1.0f/45, 2.0f/45},
{1.0f/90, -1.0f/45, 2.0f/45},
{1.0f/45, 1.0f/90, 1.0f/180},
{1.0f/45, -1.0f/90, 1.0f/180},
{ 0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i=0; i<8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j=0; j<8; j++)
{
float* tmpp = &tmp[j][0];
for (int i=0; i<8; i++)
{
kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// optimized layout for winograd4
// interleave weights
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
Mat kernel_tm2(8*8 * inch * 4, 1, nn_outch + (outch % 4 + 3) / 4);
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
float* ktm2 = kernel_tm2.channel(pp);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
int q=0;
#if __ARM_NEON && __aarch64__
for (; q+3<inch; q+=4)
{
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
const float* k02 = kernel0_tm.row(q+2);
const float* k03 = kernel0_tm.row(q+3);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q+1);
const float* k12 = kernel1_tm.row(q+2);
const float* k13 = kernel1_tm.row(q+3);
const float* k20 = kernel2_tm.row(q);
const float* k21 = kernel2_tm.row(q+1);
const float* k22 = kernel2_tm.row(q+2);
const float* k23 = kernel2_tm.row(q+3);
const float* k30 = kernel3_tm.row(q);
const float* k31 = kernel3_tm.row(q+1);
const float* k32 = kernel3_tm.row(q+2);
const float* k33 = kernel3_tm.row(q+3);
for (int r=0; r<16; r++)
{
// split into two asm blocks for gcc reject over 30 oprands :(
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"ld1 {v2.4s}, [%3], #16 \n"
"ld1 {v3.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"ld1 {v0.4s}, [%5], #16 \n"
"ld1 {v1.4s}, [%6], #16 \n"
"ld1 {v2.4s}, [%7], #16 \n"
"ld1 {v3.4s}, [%8], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k01), // %2
"=r"(k02), // %3
"=r"(k03), // %4
"=r"(k10), // %5
"=r"(k11), // %6
"=r"(k12), // %7
"=r"(k13) // %8
: "0"(ktm2),
"1"(k00),
"2"(k01),
"3"(k02),
"4"(k03),
"5"(k10),
"6"(k11),
"7"(k12),
"8"(k13)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"ld1 {v2.4s}, [%3], #16 \n"
"ld1 {v3.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"ld1 {v0.4s}, [%5], #16 \n"
"ld1 {v1.4s}, [%6], #16 \n"
"ld1 {v2.4s}, [%7], #16 \n"
"ld1 {v3.4s}, [%8], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
: "=r"(ktm2), // %0
"=r"(k20), // %1
"=r"(k21), // %2
"=r"(k22), // %3
"=r"(k23), // %4
"=r"(k30), // %5
"=r"(k31), // %6
"=r"(k32), // %7
"=r"(k33) // %8
: "0"(ktm2),
"1"(k20),
"2"(k21),
"3"(k22),
"4"(k23),
"5"(k30),
"6"(k31),
"7"(k32),
"8"(k33)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
}
#endif // __ARM_NEON && __aarch64__
for (; q+1<inch; q+=2)
{
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q+1);
const float* k20 = kernel2_tm.row(q);
const float* k21 = kernel2_tm.row(q+1);
const float* k30 = kernel3_tm.row(q);
const float* k31 = kernel3_tm.row(q+1);
for (int r=0; r<16; r++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%3], #16 \n"
"ld1 {v1.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%5], #16 \n"
"ld1 {v1.4s}, [%6], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%7], #16 \n"
"ld1 {v1.4s}, [%8], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k01), // %2
"=r"(k10), // %3
"=r"(k11), // %4
"=r"(k20), // %5
"=r"(k21), // %6
"=r"(k30), // %7
"=r"(k31) // %8
: "0"(ktm2),
"1"(k00),
"2"(k01),
"3"(k10),
"4"(k11),
"5"(k20),
"6"(k21),
"7"(k30),
"8"(k31)
: "cc", "memory", "v0", "v1"
);
#else
asm volatile(
"vld1.f32 {d0-d1}, [%1 :128]! \n"
"vld1.f32 {d2-d3}, [%2 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%3 :128]! \n"
"vld1.f32 {d2-d3}, [%4 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vld1.f32 {d2-d3}, [%6 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%7 :128]! \n"
"vld1.f32 {d2-d3}, [%8 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k01), // %2
"=r"(k10), // %3
"=r"(k11), // %4
"=r"(k20), // %5
"=r"(k21), // %6
"=r"(k30), // %7
"=r"(k31) // %8
: "0"(ktm2),
"1"(k00),
"2"(k01),
"3"(k10),
"4"(k11),
"5"(k20),
"6"(k21),
"7"(k30),
"8"(k31)
: "cc", "memory", "q0", "q1"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
ktm2[0 +m] = k00[m];
ktm2[4 +m] = k01[m];
ktm2[8 +m] = k10[m];
ktm2[12+m] = k11[m];
ktm2[16+m] = k20[m];
ktm2[20+m] = k21[m];
ktm2[24+m] = k30[m];
ktm2[28+m] = k31[m];
}
k00 += 4;
k01 += 4;
k10 += 4;
k11 += 4;
k20 += 4;
k21 += 4;
k30 += 4;
k31 += 4;
ktm2 += 32;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* k00 = kernel0_tm.row(q);
const float* k10 = kernel1_tm.row(q);
const float* k20 = kernel2_tm.row(q);
const float* k30 = kernel3_tm.row(q);
for (int r=0; r<16; r++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%3], #16 \n"
"ld1 {v1.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k10), // %2
"=r"(k20), // %3
"=r"(k30) // %4
: "0"(ktm2),
"1"(k00),
"2"(k10),
"3"(k20),
"4"(k30)
: "cc", "memory", "v0", "v1"
);
#else
asm volatile(
"vld1.f32 {d0-d1}, [%1 :128]! \n"
"vld1.f32 {d2-d3}, [%2 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%3 :128]! \n"
"vld1.f32 {d2-d3}, [%4 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k10), // %2
"=r"(k20), // %3
"=r"(k30) // %4
: "0"(ktm2),
"1"(k00),
"2"(k10),
"3"(k20),
"4"(k30)
: "cc", "memory", "q0", "q1"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
ktm2[0 +m] = k00[m];
ktm2[4 +m] = k10[m];
ktm2[8 +m] = k20[m];
ktm2[12+m] = k30[m];
}
k00 += 4;
k10 += 4;
k20 += 4;
k30 += 4;
ktm2 += 16;
#endif // __ARM_NEON
}
}
}
#pragma omp parallel for
for (int p = remain_outch_start; p<outch; p++)
{
float* ktm2 = (float*)kernel_tm2.channel(nn_outch) + 8*8 * inch * (p-remain_outch_start);
const Mat kernel0_tm = kernel_tm.channel(p);
int q = 0;
for (; q<inch; q++)
{
const float* k00 = kernel0_tm.row(q);
for (int r=0; r<16; r++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"st1 {v0.4s}, [%0], #16 \n"
: "=r"(ktm2), // %0
"=r"(k00) // %1
: "0"(ktm2),
"1"(k00)
: "cc", "memory", "v0"
);
#else
asm volatile(
"vld1.f32 {d0-d1}, [%1 :128]! \n"
"vst1.f32 {d0-d1}, [%0 :128]! \n"
: "=r"(ktm2), // %0
"=r"(k00) // %1
: "0"(ktm2),
"1"(k00)
: "cc", "memory", "q0"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
ktm2[m] = k00[m];
}
k00 += 4;
ktm2 += 4;
#endif // __ARM_NEON
}
}
}
kernel_tm = kernel_tm2;
}
static void conv3x3s1_winograd64_transform_kernel_neon5(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(8*8, inch, outch);
const float ktm[8][3] = {
{ 1.0f, 0.0f, 0.0f},
{-2.0f/9, -2.0f/9, -2.0f/9},
{-2.0f/9, 2.0f/9, -2.0f/9},
{1.0f/90, 1.0f/45, 2.0f/45},
{1.0f/90, -1.0f/45, 2.0f/45},
{1.0f/45, 1.0f/90, 1.0f/180},
{1.0f/45, -1.0f/90, 1.0f/180},
{ 0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i=0; i<8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j=0; j<8; j++)
{
float* tmpp = &tmp[j][0];
for (int i=0; i<8; i++)
{
kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// optimized layout for winograd5
// interleave weights
// Mat kernel_tm2(8*8, inch, outch);
// Mat kernel_tm2(inch, 64, outch);
#if __ARM_NEON && __aarch64__
Mat kernel_tm2(8*4*(inch/4) + 8*(inch%4), 64, outch/8 + (outch%8)/4 + outch%4);
#else
Mat kernel_tm2(4*4*(inch/4) + 4*(inch%4), 64, outch/4 + outch%4);
#endif
int p=0;
#if __aarch64__
for (; p+7<outch; p+=8)
{
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
const Mat kernel4_tm = kernel_tm.channel(p+4);
const Mat kernel5_tm = kernel_tm.channel(p+5);
const Mat kernel6_tm = kernel_tm.channel(p+6);
const Mat kernel7_tm = kernel_tm.channel(p+7);
Mat ktm2 = kernel_tm2.channel(p/8);
for (int r=0; r<64; r++)
{
float* ktm2p = ktm2.row(r);
for (int q=0; q<inch; q++)
{
const float* ktm0_0 = kernel0_tm.row(q);
const float* ktm1_0 = kernel1_tm.row(q);
const float* ktm2_0 = kernel2_tm.row(q);
const float* ktm3_0 = kernel3_tm.row(q);
const float* ktm4_0 = kernel4_tm.row(q);
const float* ktm5_0 = kernel5_tm.row(q);
const float* ktm6_0 = kernel6_tm.row(q);
const float* ktm7_0 = kernel7_tm.row(q);
ktm2p[0] = ktm0_0[r];
ktm2p[1] = ktm1_0[r];
ktm2p[2] = ktm2_0[r];
ktm2p[3] = ktm3_0[r];
ktm2p[4] = ktm4_0[r];
ktm2p[5] = ktm5_0[r];
ktm2p[6] = ktm6_0[r];
ktm2p[7] = ktm7_0[r];
ktm2p += 8;
}
}
}
#endif // __aarch64__
for (; p+3<outch; p+=4)
{
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
#if __ARM_NEON && __aarch64__
Mat ktm2 = kernel_tm2.channel(p/8+(p%8)/4);
#else
Mat ktm2 = kernel_tm2.channel(p/4);
#endif
for (int r=0; r<64; r++)
{
float* ktm2p = ktm2.row(r);
for (int q=0; q<inch; q++)
{
const float* ktm0_0 = kernel0_tm.row(q);
const float* ktm1_0 = kernel1_tm.row(q);
const float* ktm2_0 = kernel2_tm.row(q);
const float* ktm3_0 = kernel3_tm.row(q);
ktm2p[0] = ktm0_0[r];
ktm2p[1] = ktm1_0[r];
ktm2p[2] = ktm2_0[r];
ktm2p[3] = ktm3_0[r];
ktm2p += 4;
}
}
}
for (; p<outch; p++)
{
const Mat kernel0_tm = kernel_tm.channel(p);
#if __ARM_NEON && __aarch64__
Mat ktm2 = kernel_tm2.channel(p/8+(p%8)/4+p%4);
#else
Mat ktm2 = kernel_tm2.channel(p/4+p%4);
#endif
for (int r=0; r<64; r++)
{
float* ktm2p = ktm2.row(r);
for (int q=0; q<inch; q++)
{
const float* ktm0_0 = kernel0_tm.row(q);
ktm2p[0] = ktm0_0[r];
ktm2p += 1;
}
}
}
kernel_tm = kernel_tm2;
}
#if 0//TODO remove old code sometime later
static void conv3x3s1_winograd64_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(8*8, w_tm/8 * h_tm/8, inch);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
const float* r0 = img0.row(i * 6) + j * 6;
float* r0_tm = img0_tm.row(i * w_tm/8 + j);
// TODO neon optimize
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]);
r0_tm[1] = tmp12a + tmp12b;
r0_tm[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
r0_tm[3] = tmp34a + tmp34b;
r0_tm[4] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
r0_tm[5] = tmp56a + tmp56b;
r0_tm[6] = tmp56a - tmp56b;
r0_tm += 8;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(8*8, w_tm/8 * h_tm/8, outch);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
out0_tm.fill(0.f);
out1_tm.fill(0.f);
out2_tm.fill(0.f);
out3_tm.fill(0.f);
int q = 0;
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* r2 = bottom_blob_tm.channel(q+2);
const float* r3 = bottom_blob_tm.channel(q+3);
const float* k00 = kernel0_tm.row(q);
const float* k10 = kernel1_tm.row(q);
const float* k20 = kernel2_tm.row(q);
const float* k30 = kernel3_tm.row(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
// tile
for (int i=0; i<h_tm/8 * w_tm/8; i++)
{
#if __ARM_NEON
#if __aarch64__
for (int m=0; m+7<64; m+=8)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output2_tm = vld1q_f32(output2_tm);
float32x4_t _output3_tm = vld1q_f32(output3_tm);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k00 = vld1q_f32(k00);
k00 += 64;
float32x4_t _k01 = vld1q_f32(k00);
k00 += 64;
float32x4_t _k02 = vld1q_f32(k00);
k00 += 64;
float32x4_t _k03 = vld1q_f32(k00);
k00 += 64;
k00 -= 64*4;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tm = vmlaq_f32(_output0_tm, _r2, _k02);
_output0_tm = vmlaq_f32(_output0_tm, _r3, _k03);
float32x4_t _k10 = vld1q_f32(k10);
k10 += 64;
float32x4_t _k11 = vld1q_f32(k10);
k10 += 64;
float32x4_t _k12 = vld1q_f32(k10);
k10 += 64;
float32x4_t _k13 = vld1q_f32(k10);
k10 += 64;
k10 -= 64*4;
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tm = vmlaq_f32(_output1_tm, _r2, _k12);
_output1_tm = vmlaq_f32(_output1_tm, _r3, _k13);
float32x4_t _k20 = vld1q_f32(k20);
k20 += 64;
float32x4_t _k21 = vld1q_f32(k20);
k20 += 64;
float32x4_t _k22 = vld1q_f32(k20);
k20 += 64;
float32x4_t _k23 = vld1q_f32(k20);
k20 += 64;
k20 -= 64*4;
_output2_tm = vmlaq_f32(_output2_tm, _r0, _k20);
_output2_tm = vmlaq_f32(_output2_tm, _r1, _k21);
_output2_tm = vmlaq_f32(_output2_tm, _r2, _k22);
_output2_tm = vmlaq_f32(_output2_tm, _r3, _k23);
float32x4_t _k30 = vld1q_f32(k30);
k30 += 64;
float32x4_t _k31 = vld1q_f32(k30);
k30 += 64;
float32x4_t _k32 = vld1q_f32(k30);
k30 += 64;
float32x4_t _k33 = vld1q_f32(k30);
k30 += 64;
k30 -= 64*4;
_output3_tm = vmlaq_f32(_output3_tm, _r0, _k30);
_output3_tm = vmlaq_f32(_output3_tm, _r1, _k31);
_output3_tm = vmlaq_f32(_output3_tm, _r2, _k32);
_output3_tm = vmlaq_f32(_output3_tm, _r3, _k33);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output2_tm, _output2_tm);
vst1q_f32(output3_tm, _output3_tm);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
k00 += 4;
k10 += 4;
k20 += 4;
k30 += 4;
float32x4_t _output0_tmn = vld1q_f32(output0_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm);
float32x4_t _output2_tmn = vld1q_f32(output2_tm);
float32x4_t _output3_tmn = vld1q_f32(output3_tm);
float32x4_t _r0n = vld1q_f32(r0);
float32x4_t _r1n = vld1q_f32(r1);
float32x4_t _r2n = vld1q_f32(r2);
float32x4_t _r3n = vld1q_f32(r3);
float32x4_t _k00n = vld1q_f32(k00);
k00 += 64;
float32x4_t _k01n = vld1q_f32(k00);
k00 += 64;
float32x4_t _k02n = vld1q_f32(k00);
k00 += 64;
float32x4_t _k03n = vld1q_f32(k00);
k00 += 64;
k00 -= 64*4;
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k02n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k03n);
float32x4_t _k10n = vld1q_f32(k10);
k10 += 64;
float32x4_t _k11n = vld1q_f32(k10);
k10 += 64;
float32x4_t _k12n = vld1q_f32(k10);
k10 += 64;
float32x4_t _k13n = vld1q_f32(k10);
k10 += 64;
k10 -= 64*4;
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
_output1_tmn = vmlaq_f32(_output1_tmn, _r2n, _k12n);
_output1_tmn = vmlaq_f32(_output1_tmn, _r3n, _k13n);
float32x4_t _k20n = vld1q_f32(k20);
k20 += 64;
float32x4_t _k21n = vld1q_f32(k20);
k20 += 64;
float32x4_t _k22n = vld1q_f32(k20);
k20 += 64;
float32x4_t _k23n = vld1q_f32(k20);
k20 += 64;
k20 -= 64*4;
_output2_tmn = vmlaq_f32(_output2_tmn, _r0n, _k20n);
_output2_tmn = vmlaq_f32(_output2_tmn, _r1n, _k21n);
_output2_tmn = vmlaq_f32(_output2_tmn, _r2n, _k22n);
_output2_tmn = vmlaq_f32(_output2_tmn, _r3n, _k23n);
float32x4_t _k30n = vld1q_f32(k30);
k30 += 64;
float32x4_t _k31n = vld1q_f32(k30);
k30 += 64;
float32x4_t _k32n = vld1q_f32(k30);
k30 += 64;
float32x4_t _k33n = vld1q_f32(k30);
k30 += 64;
k30 -= 64*4;
_output3_tmn = vmlaq_f32(_output3_tmn, _r0n, _k30n);
_output3_tmn = vmlaq_f32(_output3_tmn, _r1n, _k31n);
_output3_tmn = vmlaq_f32(_output3_tmn, _r2n, _k32n);
_output3_tmn = vmlaq_f32(_output3_tmn, _r3n, _k33n);
vst1q_f32(output0_tm, _output0_tmn);
vst1q_f32(output1_tm, _output1_tmn);
vst1q_f32(output2_tm, _output2_tmn);
vst1q_f32(output3_tm, _output3_tmn);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
k00 += 4;
k10 += 4;
k20 += 4;
k30 += 4;
}
#else // __aarch64__
asm volatile(
"mov r4, #8 \n"
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm
"0: \n"
"pld [%4, #256] \n"
"vld1.f32 {d0-d3}, [%4 :128]! \n"//q0 q1 = _r0
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k00
"add %8, %8, #256 \n"
"vmla.f32 q8, q0, q10 \n"
"vmla.f32 q9, q1, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]\n"//q12 q13 = _output1_tm
"pld [%9, #256] \n"
"vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k10
"add %9, %9, #256 \n"
"vmla.f32 q12, q0, q14 \n"
"vmla.f32 q13, q1, q15 \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"//q2 q3 = _r1
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k01
"add %8, %8, #256 \n"
"vmla.f32 q8, q2, q10 \n"
"vmla.f32 q9, q3, q11 \n"
"pld [%9, #256] \n"
"vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k11
"add %9, %9, #256 \n"
"vmla.f32 q12, q2, q14 \n"
"vmla.f32 q13, q3, q15 \n"
"pld [%6, #256] \n"
"vld1.f32 {d8-d11}, [%6 :128]!\n"//q4 q5 = _r2
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k02
"add %8, %8, #256 \n"
"vmla.f32 q8, q4, q10 \n"
"vmla.f32 q9, q5, q11 \n"
"pld [%9, #256] \n"
"vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k12
"add %9, %9, #256 \n"
"vmla.f32 q12, q4, q14 \n"
"vmla.f32 q13, q5, q15 \n"
"pld [%7, #256] \n"
"vld1.f32 {d12-d15}, [%7 :128]!\n"//q6 q7 = _r3
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k03
"sub %8, %8, #736 \n"
"vmla.f32 q8, q6, q10 \n"
"vmla.f32 q9, q7, q11 \n"
"pld [%9, #256] \n"
"vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k13
"sub %9, %9, #736 \n"
"vmla.f32 q12, q6, q14 \n"
"vmla.f32 q13, q7, q15 \n"
"vst1.f32 {d16-d19}, [%0 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]\n"//q8 q9 = _output2_tm
"pld [%10, #256] \n"
"vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k20
"add %10, %10, #256 \n"
"vmla.f32 q8, q0, q10 \n"
"vmla.f32 q9, q1, q11 \n"
"vst1.f32 {d24-d27}, [%1 :128]!\n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]\n"//q12 q13 = _output3_tm
"pld [%11, #256] \n"
"vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k30
"add %11, %11, #256 \n"
"vmla.f32 q12, q0, q14 \n"
"vmla.f32 q13, q1, q15 \n"
"pld [%10, #256] \n"
"vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k21
"add %10, %10, #256 \n"
"vmla.f32 q8, q2, q10 \n"
"vmla.f32 q9, q3, q11 \n"
"pld [%11, #256] \n"
"vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k31
"add %11, %11, #256 \n"
"vmla.f32 q12, q2, q14 \n"
"vmla.f32 q13, q3, q15 \n"
"pld [%10, #256] \n"
"vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k22
"add %10, %10, #256 \n"
"vmla.f32 q8, q4, q10 \n"
"vmla.f32 q9, q5, q11 \n"
"pld [%11, #256] \n"
"vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k32
"add %11, %11, #256 \n"
"vmla.f32 q12, q4, q14 \n"
"vmla.f32 q13, q5, q15 \n"
"pld [%10, #256] \n"
"vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k23
"sub %10, %10, #736 \n"
"vmla.f32 q8, q6, q10 \n"
"vmla.f32 q9, q7, q11 \n"
"pld [%11, #256] \n"
"vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k33
"sub %11, %11, #736 \n"
"vmla.f32 q12, q6, q14 \n"
"vmla.f32 q13, q7, q15 \n"
"vst1.f32 {d16-d19}, [%2 :128]!\n"
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm
"subs r4, r4, #1 \n"
"vst1.f32 {d24-d27}, [%3 :128]!\n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(r2), // %6
"=r"(r3), // %7
"=r"(k00), // %8
"=r"(k10), // %9
"=r"(k20), // %10
"=r"(k30) // %11
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"6"(r2),
"7"(r3),
"8"(k00),
"9"(k10),
"10"(k20),
"11"(k30)
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
k00 -= 64;
k10 -= 64;
k20 -= 64;
k30 -= 64;
#else
for (int m=0; m<64; m++)
{
output0_tm[m] += r0[m] * k00[m];
k00 += 64;
output0_tm[m] += r1[m] * k00[m];
k00 += 64;
output0_tm[m] += r2[m] * k00[m];
k00 += 64;
output0_tm[m] += r3[m] * k00[m];
k00 += 64;
k00 -= 64 * 4;
output1_tm[m] += r0[m] * k10[m];
k10 += 64;
output1_tm[m] += r1[m] * k10[m];
k10 += 64;
output1_tm[m] += r2[m] * k10[m];
k10 += 64;
output1_tm[m] += r3[m] * k10[m];
k10 += 64;
k10 -= 64 * 4;
output2_tm[m] += r0[m] * k20[m];
k20 += 64;
output2_tm[m] += r1[m] * k20[m];
k20 += 64;
output2_tm[m] += r2[m] * k20[m];
k20 += 64;
output2_tm[m] += r3[m] * k20[m];
k20 += 64;
k20 -= 64 * 4;
output3_tm[m] += r0[m] * k30[m];
k30 += 64;
output3_tm[m] += r1[m] * k30[m];
k30 += 64;
output3_tm[m] += r2[m] * k30[m];
k30 += 64;
output3_tm[m] += r3[m] * k30[m];
k30 += 64;
k30 -= 64 * 4;
}
r0 += 64;
r1 += 64;
r2 += 64;
r3 += 64;
output0_tm += 64;
output1_tm += 64;
output2_tm += 64;
output3_tm += 64;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
// tile
for (int i=0; i<h_tm/8 * w_tm/8; i++)
{
// TODO neon optimize
for (int m=0; m<64; m++)
{
output0_tm[m] += r0[m] * k0[m];
output1_tm[m] += r0[m] * k1[m];
output2_tm[m] += r0[m] * k2[m];
output3_tm[m] += r0[m] * k3[m];
}
r0 += 64;
output0_tm += 64;
output1_tm += 64;
output2_tm += 64;
output3_tm += 64;
}
}
}
#pragma omp parallel for
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
out0_tm.fill(0.f);
int q = 0;
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* r2 = bottom_blob_tm.channel(q+2);
const float* r3 = bottom_blob_tm.channel(q+3);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q+1);
const float* k2 = kernel0_tm.row(q+2);
const float* k3 = kernel0_tm.row(q+3);
float* output0_tm = out0_tm;
// tile
for (int i=0; i<h_tm/8 * w_tm/8; i++)
{
#if __ARM_NEON
#if __aarch64__
for (int m=0; m+7<64; m+=8)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
float32x4_t _k3 = vld1q_f32(k3);
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tm = vmlaq_f32(_output0_tm, _r2, _k2);
_output0_tm = vmlaq_f32(_output0_tm, _r3, _k3);
vst1q_f32(output0_tm, _output0_tm);
output0_tm += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
float32x4_t _output0_tmn = vld1q_f32(output0_tm);
float32x4_t _r0n = vld1q_f32(r0);
float32x4_t _r1n = vld1q_f32(r1);
float32x4_t _r2n = vld1q_f32(r2);
float32x4_t _r3n = vld1q_f32(r3);
float32x4_t _k0n = vld1q_f32(k0);
float32x4_t _k1n = vld1q_f32(k1);
float32x4_t _k2n = vld1q_f32(k2);
float32x4_t _k3n = vld1q_f32(k3);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k2n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k3n);
vst1q_f32(output0_tm, _output0_tmn);
output0_tm += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
}
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"mov r4, %0 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q13, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm
"vmla.f32 q13, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"vst1.f32 {d24-d27}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q15, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm
"vmla.f32 q15, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"vst1.f32 {d28-d31}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q13, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm
"vmla.f32 q13, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"vst1.f32 {d24-d27}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q15, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm
"vmla.f32 q15, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"vst1.f32 {d28-d31}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q13, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm
"vmla.f32 q13, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"vst1.f32 {d24-d27}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q15, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm
"vmla.f32 q15, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"vst1.f32 {d28-d31}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q13, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm
"vmla.f32 q13, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"vst1.f32 {d24-d27}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q15, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"vmla.f32 q15, q9, q11 \n"
"vst1.f32 {d28-d31}, [r4 :128]!\n"
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(k0), // %5
"=r"(k1), // %6
"=r"(k2), // %7
"=r"(k3) // %8
: "0"(output0_tm),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(k0),
"6"(k1),
"7"(k2),
"8"(k3)
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
k0 -= 64;
k1 -= 64;
k2 -= 64;
k3 -= 64;
#else
for (int m=0; m<64; m++)
{
output0_tm[m] += r0[m] * k0[m];
output0_tm[m] += r1[m] * k1[m];
output0_tm[m] += r2[m] * k2[m];
output0_tm[m] += r3[m] * k3[m];
}
r0 += 64;
r1 += 64;
r2 += 64;
r3 += 64;
output0_tm += 64;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k0 = kernel0_tm.row(q);
float* output0_tm = out0_tm;
// tile
for (int i=0; i<h_tm/8 * w_tm/8; i++)
{
// TODO neon optimize
for (int m=0; m<64; m++)
{
output0_tm[m] += r0[m] * k0[m];
}
r0 += 64;
output0_tm += 64;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
const float* output0_tm = out0_tm.row(i * w_tm/8 + j);
float* output0 = out0.row(i * 6) + j * 6;
// TODO neon optimize
for (int m=0; m<8; m++)
{
float tmp024a = output0_tm[1] + output0_tm[2];
float tmp135a = output0_tm[1] - output0_tm[2];
float tmp024b = output0_tm[3] + output0_tm[4];
float tmp135b = output0_tm[3] - output0_tm[4];
float tmp024c = output0_tm[5] + output0_tm[6];
float tmp135c = output0_tm[5] - output0_tm[6];
tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm += 8;
}
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w);
}
static void conv3x3s1_winograd64_neon2(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, inch);
const int tiles = w_tm/8 * h_tm/8;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
const float* r0 = img0.row(i * 6) + j * 6;
float* r0_tm01 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm23 = img0_tm.row(tiles + i * w_tm/8 + j);
float* r0_tm45 = img0_tm.row(tiles * 2 + i * w_tm/8 + j);
float* r0_tm67 = img0_tm.row(tiles * 3 + i * w_tm/8 + j);
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tms[4] = { r0_tm01, r0_tm23, r0_tm45, r0_tm67 };
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
float* r0_tm = r0_tms[m/2] + (m%2) * 8;
r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]);
r0_tm[1] = tmp12a + tmp12b;
r0_tm[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
r0_tm[3] = tmp34a + tmp34b;
r0_tm[4] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
r0_tm[5] = tmp56a + tmp56b;
r0_tm[6] = tmp56a - tmp56b;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, outch);
const int tiles = h_tm/8 * w_tm/8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
out0_tm.fill(0.f);
int q = 0;
for (; q+1<inch; q+=2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q+1);
float* output0_tm = out0_tm;
for (int r=0; r<4; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k0n = vld1q_f32(k0+4);
float32x4_t _k0nn = vld1q_f32(k0+8);
float32x4_t _k0nnn = vld1q_f32(k0+12);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k1n = vld1q_f32(k1+4);
float32x4_t _k1nn = vld1q_f32(k1+8);
float32x4_t _k1nnn = vld1q_f32(k1+12);
#else
float32x4_t _k0;
float32x4_t _k0n;
float32x4_t _k0nn;
float32x4_t _k0nnn;
float32x4_t _k1;
float32x4_t _k1n;
float32x4_t _k1nn;
float32x4_t _k1nnn;
asm volatile(
"pld [%0, #512] \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
"pld [%1, #512] \n"
"vld1.f32 {%e4-%f4}, [%1 :128]! \n"
"vld1.f32 {%e3-%f3}, [%0 :128]! \n"
"vld1.f32 {%e5-%f5}, [%1 :128]! \n"
"vld1.f32 {%e6-%f6}, [%0 :128]! \n"
"vld1.f32 {%e8-%f8}, [%1 :128]! \n"
"vld1.f32 {%e7-%f7}, [%0 :128]! \n"
"vld1.f32 {%e9-%f9}, [%1 :128]! \n"
: "=r"(k0), // %0
"=r"(k1), // %1
"=w"(_k0), // %2
"=w"(_k0n), // %3
"=w"(_k1), // %4
"=w"(_k1n), // %5
"=w"(_k0nn), // %6
"=w"(_k0nnn), // %7
"=w"(_k1nn), // %8
"=w"(_k1nnn) // %9
: "0"(k0),
"1"(k1)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
}
#else
if (nn > 0)
{
asm volatile(
"mov r4, %1 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d20-d23}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d20-d23}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d20-d23}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"subs %0, #1 \n"
"vst1.f32 {d20-d23}, [r4 :128]! \n"
"bne 0b \n"
"sub %1, #32 \n"
"sub %2, #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(r1) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(r1),
"w"(_k0), // %8
"w"(_k0n), // %9
"w"(_k1), // %10
"w"(_k1n), // %11
"w"(_k0nn), // %12
"w"(_k0nnn), // %13
"w"(_k1nn), // %14
"w"(_k1nnn) // %15
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
#else
asm volatile(
"mov r4, %0 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q6 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1
"vmla.f32 q9, q13, %q7 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"vmla.f32 q8, q14, %q8 \n"
"pld [%0, #256] \n"
"vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm
"vmla.f32 q9, q15, %q9 \n"
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"vst1.f32 {d16-d19}, [r4 :128] \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1
"vmla.f32 q10, q14, %q12 \n"
"vmla.f32 q11, q15, %q13 \n"
"vst1.f32 {d20-d23}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(r1) // %2
: "0"(output0_tm),
"1"(r0),
"2"(r1),
"w"(_k0), // %6
"w"(_k0n), // %7
"w"(_k1), // %8
"w"(_k1n), // %9
"w"(_k0nn), // %10
"w"(_k0nnn), // %11
"w"(_k1nn), // %12
"w"(_k1nnn) // %13
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<16; m++)
{
output0_tm[m] += r0[m] * k0[m];
output0_tm[m] += r1[m] * k1[m];
}
r0 += 16;
r1 += 16;
output0_tm += 16;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k0 += 16;
k1 += 16;
#endif // __aarch64__
#else
k0 += 16;
k1 += 16;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k0 = kernel0_tm.row(q);
float* output0_tm = out0_tm;
for (int r=0; r<4; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k0n = vld1q_f32(k0+4);
float32x4_t _k0nn = vld1q_f32(k0+8);
float32x4_t _k0nnn = vld1q_f32(k0+12);
#else
float32x4_t _k0;
float32x4_t _k0n;
float32x4_t _k0nn;
float32x4_t _k0nnn;
asm volatile(
"pld [%0, #512] \n"
"vld1.f32 {%e1-%f1}, [%0 :128]! \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
"vld1.f32 {%e3-%f3}, [%0 :128]! \n"
"vld1.f32 {%e4-%f4}, [%0 :128]! \n"
: "=r"(k0), // %0
"=w"(_k0), // %1
"=w"(_k0n), // %2
"=w"(_k0nn), // %3
"=w"(_k0nnn) // %4
: "0"(k0)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
for (int i=0; i<tiles; i++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
#else
asm volatile(
"mov r4, %0 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q4 \n"
"vmla.f32 q9, q13, %q5 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q6 \n"
"vst1.f32 {d16-d19}, [r4 :128] \n"
"vmla.f32 q11, q13, %q7 \n"
"vst1.f32 {d20-d23}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k0), // %4
"w"(_k0n), // %5
"w"(_k0nn), // %6
"w"(_k0nnn) // %7
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<16; m++)
{
output0_tm[m] += r0[m] * k0[m];
}
r0 += 16;
output0_tm += 16;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k0 += 16;
#endif // __aarch64__
#else
k0 += 16;
#endif // __ARM_NEON
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
const float* output0_tm01 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm23 = out0_tm.row(tiles + i * w_tm/8 + j);
const float* output0_tm45 = out0_tm.row(tiles * 2 + i * w_tm/8 + j);
const float* output0_tm67 = out0_tm.row(tiles * 3 + i * w_tm/8 + j);
float* output0 = out0.row(i * 6) + j * 6;
const float* output0_tms[4] = { output0_tm01, output0_tm23, output0_tm45, output0_tm67 };
for (int m=0; m<8; m++)
{
const float* output0_tm = output0_tms[m/2] + (m%2) * 8;
float tmp024a = output0_tm[1] + output0_tm[2];
float tmp135a = output0_tm[1] - output0_tm[2];
float tmp024b = output0_tm[3] + output0_tm[4];
float tmp135b = output0_tm[3] - output0_tm[4];
float tmp024c = output0_tm[5] + output0_tm[6];
float tmp135c = output0_tm[5] - output0_tm[6];
tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
}
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w);
}
static void conv3x3s1_winograd64_neon3(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, inch);
const int tiles = w_tm/8 * h_tm/8;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
const float* r0 = img0.row(i * 6) + j * 6;
float* r0_tm0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles);
float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles * 2);
float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles * 3);
float* r0_tm4 = img0_tm.row(i * w_tm/8 + j + tiles * 4);
float* r0_tm5 = img0_tm.row(i * w_tm/8 + j + tiles * 5);
float* r0_tm6 = img0_tm.row(i * w_tm/8 + j + tiles * 6);
float* r0_tm7 = img0_tm.row(i * w_tm/8 + j + tiles * 7);
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tms[8] = { r0_tm0, r0_tm1, r0_tm2, r0_tm3, r0_tm4, r0_tm5, r0_tm6, r0_tm7 };
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
float* r0_tm = r0_tms[m];
r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]);
r0_tm[1] = tmp12a + tmp12b;
r0_tm[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
r0_tm[3] = tmp34a + tmp34b;
r0_tm[4] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
r0_tm[5] = tmp56a + tmp56b;
r0_tm[6] = tmp56a - tmp56b;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, outch);
const int tiles = h_tm/8 * w_tm/8;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
out0_tm.fill(0.f);
out1_tm.fill(0.f);
int q = 0;
for (; q+1<inch; q+=2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q+1);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
for (int r=0; r<8; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k00n = vld1q_f32(k00+4);
float32x4_t _k01 = vld1q_f32(k01);
float32x4_t _k01n = vld1q_f32(k01+4);
float32x4_t _k10 = vld1q_f32(k10);
float32x4_t _k10n = vld1q_f32(k10+4);
float32x4_t _k11 = vld1q_f32(k11);
float32x4_t _k11n = vld1q_f32(k11+4);
#else
float32x4_t _k00;
float32x4_t _k00n;
float32x4_t _k01;
float32x4_t _k01n;
float32x4_t _k10;
float32x4_t _k10n;
float32x4_t _k11;
float32x4_t _k11n;
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {%e4-%f4}, [%0 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {%e6-%f6}, [%1 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {%e8-%f8}, [%2 :128]! \n"
"pld [%3, #256] \n"
"vld1.f32 {%e10-%f10}, [%3 :128]! \n"
"vld1.f32 {%e5-%f5}, [%0 :128]! \n"
"vld1.f32 {%e7-%f7}, [%1 :128]! \n"
"vld1.f32 {%e9-%f9}, [%2 :128]! \n"
"vld1.f32 {%e11-%f11}, [%3 :128]! \n"
: "=r"(k00), // %0
"=r"(k01), // %1
"=r"(k10), // %2
"=r"(k11), // %3
"=w"(_k00), // %4
"=w"(_k00n), // %5
"=w"(_k01), // %6
"=w"(_k01n), // %7
"=w"(_k10), // %8
"=w"(_k10n), // %9
"=w"(_k11), // %10
"=w"(_k11n) // %11
: "0"(k00),
"1"(k01),
"2"(k10),
"3"(k11)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"vmla.f32 q9, q13, %q11 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q12 \n"
"vmla.f32 q9, q15, %q13 \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q14 \n"
"vmla.f32 q11, q13, %q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q16 \n"
"vmla.f32 q11, q15, %q17 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"vmla.f32 q9, q13, %q11 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q12 \n"
"vmla.f32 q9, q15, %q13 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q14 \n"
"vmla.f32 q11, q13, %q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q16 \n"
"vmla.f32 q11, q15, %q17 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"vmla.f32 q9, q13, %q11 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q12 \n"
"vmla.f32 q9, q15, %q13 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q14 \n"
"vmla.f32 q11, q13, %q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q16 \n"
"vmla.f32 q11, q15, %q17 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"vmla.f32 q9, q13, %q11 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q12 \n"
"vmla.f32 q9, q15, %q13 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q14 \n"
"vmla.f32 q11, q13, %q15 \n"
"vmla.f32 q10, q14, %q16 \n"
"vmla.f32 q11, q15, %q17 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(r1) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(r1),
"w"(_k00), // %10
"w"(_k00n), // %11
"w"(_k01), // %12
"w"(_k01n), // %13
"w"(_k10), // %14
"w"(_k10n), // %15
"w"(_k11), // %16
"w"(_k11n) // %17
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
#else
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(r0), // %2
"=r"(r1) // %3
: "0"(output0_tm),
"1"(output1_tm),
"2"(r0),
"3"(r1),
"w"(_k00), // %8
"w"(_k00n), // %9
"w"(_k01), // %10
"w"(_k01n), // %11
"w"(_k10), // %12
"w"(_k10n), // %13
"w"(_k11), // %14
"w"(_k11n) // %15
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<8; m++)
{
output0_tm[m] += r0[m] * k00[m];
output0_tm[m] += r1[m] * k01[m];
output1_tm[m] += r0[m] * k10[m];
output1_tm[m] += r1[m] * k11[m];
}
r0 += 8;
r1 += 8;
output0_tm += 8;
output1_tm += 8;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 8;
k01 += 8;
k10 += 8;
k11 += 8;
#endif // __aarch64__
#else
k00 += 8;
k01 += 8;
k10 += 8;
k11 += 8;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k00 = kernel0_tm.row(q);
const float* k10 = kernel1_tm.row(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
for (int r=0; r<8; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k00n = vld1q_f32(k00+4);
float32x4_t _k10 = vld1q_f32(k10);
float32x4_t _k10n = vld1q_f32(k10+4);
#else
float32x4_t _k00;
float32x4_t _k00n;
float32x4_t _k10;
float32x4_t _k10n;
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {%e4-%f4}, [%1 :128]! \n"
"vld1.f32 {%e3-%f3}, [%0 :128]! \n"
"vld1.f32 {%e5-%f5}, [%1 :128]! \n"
: "=r"(k00), // %0
"=r"(k10), // %1
"=w"(_k00), // %2
"=w"(_k00n), // %3
"=w"(_k10), // %4
"=w"(_k10n) // %5
: "0"(k00),
"1"(k10)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"w"(_k00), // %8
"w"(_k00n), // %9
"w"(_k10), // %10
"w"(_k10n) // %11
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
#else
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q6 \n"
"vmla.f32 q9, q13, %q7 \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q8 \n"
"vmla.f32 q11, q13, %q9 \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(r0) // %2
: "0"(output0_tm),
"1"(output1_tm),
"2"(r0),
"w"(_k00), // %6
"w"(_k00n), // %7
"w"(_k10), // %8
"w"(_k10n) // %9
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13"
);
#endif // __aarch64__
#else
for (int m=0; m<8; m++)
{
output0_tm[m] += r0[m] * k00[m];
output1_tm[m] += r0[m] * k10[m];
}
r0 += 8;
output0_tm += 8;
output1_tm += 8;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 8;
k10 += 8;
#endif // __aarch64__
#else
k00 += 8;
k10 += 8;
#endif // __ARM_NEON
}
}
}
#pragma omp parallel for
for (int p = remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
out0_tm.fill(0.f);
int q = 0;
for (; q+1<inch; q+=2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
float* output0_tm = out0_tm;
for (int r=0; r<8; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k00n = vld1q_f32(k00+4);
float32x4_t _k01 = vld1q_f32(k01);
float32x4_t _k01n = vld1q_f32(k01+4);
#else
float32x4_t _k00;
float32x4_t _k00n;
float32x4_t _k01;
float32x4_t _k01n;
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {%e4-%f4}, [%1 :128]! \n"
"vld1.f32 {%e3-%f3}, [%0 :128]! \n"
"vld1.f32 {%e5-%f5}, [%1 :128]! \n"
: "=r"(k00), // %0
"=r"(k01), // %1
"=w"(_k00), // %2
"=w"(_k00n), // %3
"=w"(_k01), // %4
"=w"(_k01n) // %5
: "0"(k00),
"1"(k01)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(r1) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(r1),
"w"(_k00), // %8
"w"(_k00n), // %9
"w"(_k01), // %10
"w"(_k01n) // %11
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q6 \n"
"vmla.f32 q9, q13, %q7 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q8 \n"
"vmla.f32 q9, q15, %q9 \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(r1) // %2
: "0"(output0_tm),
"1"(r0),
"2"(r1),
"w"(_k00), // %6
"w"(_k00n), // %7
"w"(_k01), // %8
"w"(_k01n) // %9
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<8; m++)
{
output0_tm[m] += r0[m] * k00[m];
output0_tm[m] += r1[m] * k01[m];
}
r0 += 8;
r1 += 8;
output0_tm += 8;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 8;
k01 += 8;
#endif // __aarch64__
#else
k00 += 8;
k01 += 8;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k00 = kernel0_tm.row(q);
float* output0_tm = out0_tm;
for (int r=0; r<8; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k00n = vld1q_f32(k00+4);
#else
float32x4_t _k00;
float32x4_t _k00n;
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {%e1-%f1}, [%0 :128]! \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
: "=r"(k00), // %0
"=w"(_k00), // %1
"=w"(_k00n) // %2
: "0"(k00)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
for (int i=0; i<tiles; i++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q4 \n"
"vmla.f32 q9, q13, %q5 \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k00), // %4
"w"(_k00n) // %5
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<8; m++)
{
output0_tm[m] += r0[m] * k00[m];
}
r0 += 8;
output0_tm += 8;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 8;
#endif // __aarch64__
#else
k00 += 8;
#endif // __ARM_NEON
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles);
const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles * 2);
const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles * 3);
const float* output0_tm4 = out0_tm.row(i * w_tm/8 + j + tiles * 4);
const float* output0_tm5 = out0_tm.row(i * w_tm/8 + j + tiles * 5);
const float* output0_tm6 = out0_tm.row(i * w_tm/8 + j + tiles * 6);
const float* output0_tm7 = out0_tm.row(i * w_tm/8 + j + tiles * 7);
float* output0 = out0.row(i * 6) + j * 6;
const float* output0_tms[8] = { output0_tm0, output0_tm1, output0_tm2, output0_tm3, output0_tm4, output0_tm5, output0_tm6, output0_tm7 };
for (int m=0; m<8; m++)
{
const float* output0_tm = output0_tms[m];
float tmp024a = output0_tm[1] + output0_tm[2];
float tmp135a = output0_tm[1] - output0_tm[2];
float tmp024b = output0_tm[3] + output0_tm[4];
float tmp135b = output0_tm[3] - output0_tm[4];
float tmp024c = output0_tm[5] + output0_tm[6];
float tmp135c = output0_tm[5] - output0_tm[6];
tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
}
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w);
}
#endif
static void conv3x3s1_winograd64_neon4(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, inch, 4u, opt.workspace_allocator);
const int tiles = w_tm/8 * h_tm/8;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#if __ARM_NEON
const float coeff[8] = {
0.25f, 0.5f, -1.25f, 2.f,
-2.5f, 4.f, 4.25f, 5.25f
};
float32x4_t _coeff0 = vld1q_f32(coeff);
float32x4_t _coeff1 = vld1q_f32(coeff+4);
#endif // __ARM_NEON
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
#if __ARM_NEON
const float* r0 = img0.row(i * 6) + j * 6;
const float* r1 = r0 + w;
const float* r2 = r0 + w*2;
const float* r3 = r0 + w*3;
#if __aarch64__
for (int m=0; m+3<8; m+=4)
{
float32x4_t _r0_0123 = vld1q_f32(r0);
float32x4_t _r0_4567 = vld1q_f32(r0+4);
float32x4_t _r1_0123 = vld1q_f32(r1);
float32x4_t _r1_4567 = vld1q_f32(r1+4);
float32x4_t _r2_0123 = vld1q_f32(r2);
float32x4_t _r2_4567 = vld1q_f32(r2+4);
float32x4_t _r3_0123 = vld1q_f32(r3);
float32x4_t _r3_4567 = vld1q_f32(r3+4);
float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123);
float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567);
float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123);
float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567);
// no vswp intrinsic :(
float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0]));
float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1]));
float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0]));
float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1]));
float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0]));
float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1]));
float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0]));
float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1]));
float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66);
float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11);
float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22);
float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55);
float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[7][m], _tmp7);
float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66);
float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0);
float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[2][m], _tmp2);
float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0);
float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1);
float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[4][m], _tmp4);
// reuse r04 * 1.25
// reuse r03 * 2.5
float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1);
float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(&tmp[5][m], _tmp5);
vst1q_f32(&tmp[6][m], _tmp6);
r0 += w*4;
r1 += w*4;
r2 += w*4;
r3 += w*4;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
const float* t2 = tmp[2];
const float* t3 = tmp[3];
float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles);
float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2);
float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3);
float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4);
float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5);
float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6);
float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7);
for (int m=0; m+3<8; m+=4)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4_t _t2_0123 = vld1q_f32(t2);
float32x4_t _t2_4567 = vld1q_f32(t2+4);
float32x4_t _t3_0123 = vld1q_f32(t3);
float32x4_t _t3_4567 = vld1q_f32(t3+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123);
float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567);
// no vswp intrinsic :(
float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0]));
float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1]));
float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0]));
float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1]));
float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0]));
float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1]));
float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0]));
float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1]));
float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66);
float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11);
float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22);
float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55);
float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1);
r0_tm0_0[0] = vgetq_lane_f32(_r0_tm_0_0, 0);
r0_tm1_0[0] = vgetq_lane_f32(_r0_tm_0_0, 1);
r0_tm2_0[0] = vgetq_lane_f32(_r0_tm_0_0, 2);
r0_tm3_0[0] = vgetq_lane_f32(_r0_tm_0_0, 3);
r0_tm0_4[3] = vgetq_lane_f32(_r0_tm_4_3, 0);
r0_tm1_4[3] = vgetq_lane_f32(_r0_tm_4_3, 1);
r0_tm2_4[3] = vgetq_lane_f32(_r0_tm_4_3, 2);
r0_tm3_4[3] = vgetq_lane_f32(_r0_tm_4_3, 3);
float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66);
float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0);
float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b);
r0_tm0_0[1] = vgetq_lane_f32(_r0_tm_0_1, 0);
r0_tm1_0[1] = vgetq_lane_f32(_r0_tm_0_1, 1);
r0_tm2_0[1] = vgetq_lane_f32(_r0_tm_0_1, 2);
r0_tm3_0[1] = vgetq_lane_f32(_r0_tm_0_1, 3);
r0_tm0_0[2] = vgetq_lane_f32(_r0_tm_0_2, 0);
r0_tm1_0[2] = vgetq_lane_f32(_r0_tm_0_2, 1);
r0_tm2_0[2] = vgetq_lane_f32(_r0_tm_0_2, 2);
r0_tm3_0[2] = vgetq_lane_f32(_r0_tm_0_2, 3);
float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0);
float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1);
float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b);
r0_tm0_0[3] = vgetq_lane_f32(_r0_tm_0_3, 0);
r0_tm1_0[3] = vgetq_lane_f32(_r0_tm_0_3, 1);
r0_tm2_0[3] = vgetq_lane_f32(_r0_tm_0_3, 2);
r0_tm3_0[3] = vgetq_lane_f32(_r0_tm_0_3, 3);
r0_tm0_4[0] = vgetq_lane_f32(_r0_tm_4_0, 0);
r0_tm1_4[0] = vgetq_lane_f32(_r0_tm_4_0, 1);
r0_tm2_4[0] = vgetq_lane_f32(_r0_tm_4_0, 2);
r0_tm3_4[0] = vgetq_lane_f32(_r0_tm_4_0, 3);
float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1);
float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b);
r0_tm0_4[1] = vgetq_lane_f32(_r0_tm_4_1, 0);
r0_tm1_4[1] = vgetq_lane_f32(_r0_tm_4_1, 1);
r0_tm2_4[1] = vgetq_lane_f32(_r0_tm_4_1, 2);
r0_tm3_4[1] = vgetq_lane_f32(_r0_tm_4_1, 3);
r0_tm0_4[2] = vgetq_lane_f32(_r0_tm_4_2, 0);
r0_tm1_4[2] = vgetq_lane_f32(_r0_tm_4_2, 1);
r0_tm2_4[2] = vgetq_lane_f32(_r0_tm_4_2, 2);
r0_tm3_4[2] = vgetq_lane_f32(_r0_tm_4_2, 3);
t0 += 8*4;
t1 += 8*4;
t2 += 8*4;
t3 += 8*4;
r0_tm0_0 += img0_tm.w*tiles*2*4;
r0_tm0_4 += img0_tm.w*tiles*2*4;
r0_tm1_0 += img0_tm.w*tiles*2*4;
r0_tm1_4 += img0_tm.w*tiles*2*4;
r0_tm2_0 += img0_tm.w*tiles*2*4;
r0_tm2_4 += img0_tm.w*tiles*2*4;
r0_tm3_0 += img0_tm.w*tiles*2*4;
r0_tm3_4 += img0_tm.w*tiles*2*4;
}
#else // __aarch64__
float* t0 = tmp[0];
float* t1 = tmp[1];
float* t2 = tmp[2];
float* t3 = tmp[3];
float* t4 = tmp[4];
float* t5 = tmp[5];
float* t6 = tmp[6];
float* t7 = tmp[7];
int stepw = w*4*4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8], %26 \n"
"vld1.f32 {d20-d23}, [%9], %26 \n"
"vld1.f32 {d24-d27}, [%10], %26 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11], %26 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m]
"vmov q3, q7 \n"// use q7
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m]
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m]
"vmov q3, q7 \n"// use q7
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m]
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(t2), // %2
"=r"(t3), // %3
"=r"(t4), // %4
"=r"(t5), // %5
"=r"(t6), // %6
"=r"(t7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(r3) // %11
: "0"(t0),
"1"(t1),
"2"(t2),
"3"(t3),
"4"(t4),
"5"(t5),
"6"(t6),
"7"(t7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(r3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(stepw) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
t2 = tmp[2];
t3 = tmp[3];
float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles);
float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2);
float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3);
float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4);
float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5);
float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6);
float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7);
int step = img0_tm.w*tiles*2*4*4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8] \n"
"add %8, %8, #128 \n"
"vld1.f32 {d20-d23}, [%9] \n"
"add %9, %9, #128 \n"
"vld1.f32 {d24-d27}, [%10] \n"
"add %10, %10, #128 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"add %11, %11, #128 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%0]! \n"
"vst1.f32 {d4[1]}, [%2]! \n"
"vmov q3, q7 \n"// use q7
"vst1.f32 {d5[0]}, [%4]! \n"
"vst1.f32 {d5[1]}, [%6]! \n"
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%0]! \n"
"vst1.f32 {d16[1]}, [%2]! \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%4]! \n"
"vst1.f32 {d17[1]}, [%6]! \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%0]! \n"
"vst1.f32 {d18[1]}, [%2]! \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%4]! \n"
"vst1.f32 {d19[1]}, [%6]! \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16[0]}, [%0], %26 \n"
"vst1.f32 {d16[1]}, [%2], %26 \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d17[0]}, [%4], %26 \n"
"vst1.f32 {d17[1]}, [%6], %26 \n"
"vtrn.32 q9, q2 \n"
"vtrn.32 q3, q6 \n"
"sub %0, %0, #12 \n"
"sub %2, %2, #12 \n"
"sub %4, %4, #12 \n"
"sub %6, %6, #12 \n"
"vswp d19, d6 \n"
"vswp d5, d12 \n"
"vst1.f32 {d18-d19}, [%1], %26 \n"
"vst1.f32 {d4-d5}, [%3], %26 \n"
"vst1.f32 {d6-d7}, [%5], %26 \n"
"vst1.f32 {d12-d13}, [%7], %26 \n"
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%0]! \n"
"vst1.f32 {d4[1]}, [%2]! \n"
"vmov q3, q7 \n"// use q7
"vst1.f32 {d5[0]}, [%4]! \n"
"vst1.f32 {d5[1]}, [%6]! \n"
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%0]! \n"
"vst1.f32 {d16[1]}, [%2]! \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%4]! \n"
"vst1.f32 {d17[1]}, [%6]! \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%0]! \n"
"vst1.f32 {d18[1]}, [%2]! \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%4]! \n"
"vst1.f32 {d19[1]}, [%6]! \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16[0]}, [%0] \n"
"vst1.f32 {d16[1]}, [%2] \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d17[0]}, [%4] \n"
"vst1.f32 {d17[1]}, [%6] \n"
"vtrn.32 q9, q2 \n"
"vtrn.32 q3, q6 \n"
"vswp d19, d6 \n"
"vswp d5, d12 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"vst1.f32 {d4-d5}, [%3] \n"
"vst1.f32 {d6-d7}, [%5] \n"
"vst1.f32 {d12-d13}, [%7] \n"
: "=r"(r0_tm0_0), // %0
"=r"(r0_tm0_4), // %1
"=r"(r0_tm1_0), // %2
"=r"(r0_tm1_4), // %3
"=r"(r0_tm2_0), // %4
"=r"(r0_tm2_4), // %5
"=r"(r0_tm3_0), // %6
"=r"(r0_tm3_4), // %7
"=r"(t0), // %8
"=r"(t1), // %9
"=r"(t2), // %10
"=r"(t3) // %11
: "0"(r0_tm0_0),
"1"(r0_tm0_4),
"2"(r0_tm1_0),
"3"(r0_tm1_4),
"4"(r0_tm2_0),
"5"(r0_tm2_4),
"6"(r0_tm3_0),
"7"(r0_tm3_4),
"8"(t0),
"9"(t1),
"10"(t2),
"11"(t3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(step) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* r0 = img0.row(i * 6) + j * 6;
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f);
float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f);
float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles);
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f;
r0_tm_4[3] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]);
r0_tm_0[1] = tmp12a + tmp12b;
r0_tm_0[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f);
float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f);
r0_tm_0[3] = tmp34a + tmp34b;
r0_tm_4[0] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f);
float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f);
r0_tm_4[1] = tmp56a + tmp56b;
r0_tm_4[2] = tmp56a - tmp56b;
r0_tm_0 += img0_tm.w * tiles * 2;
r0_tm_4 += img0_tm.w * tiles * 2;
}
#endif // __ARM_NEON
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, outch, 4u, opt.workspace_allocator);
const int tiles = h_tm/8 * w_tm/8;
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
const float* ktm = kernel_tm.channel(pp);
out0_tm.fill(0.f);
out1_tm.fill(0.f);
out2_tm.fill(0.f);
out3_tm.fill(0.f);
int q = 0;
#if __ARM_NEON && __aarch64__
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* r2 = bottom_blob_tm.channel(q+2);
const float* r3 = bottom_blob_tm.channel(q+3);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
asm volatile(
"mov w0, #16 \n"// w0 = r = 16
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%8], #64 \n"// v0 v1 v2 v3 = _k00 _k01 _k02 _k03
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%8], #64 \n"// v4 v5 v6 v7 = _k10 _k11 _k12 _k13
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"// v8 v9 v10 v11 = _k20 _k21 _k22 _k23
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"// v12 v13 v14 v15 = _k30 _k31 _k32 _k33
// tile loop
"lsr w1, %w18, #2 \n"// w1 = nn = tiles >> 2
"cmp w1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"prfm pldl1keep, [%4, #128] \n"//
"ld1 {v16.4s}, [%4], #16 \n"
"1: \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"
"add x4, %0, #16 \n"// x4 = %0 next
"fmla v20.4s, v16.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v21.4s}, [%1] \n"
"add x5, %1, #16 \n"// x5 = %1 next
"fmla v21.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v22.4s}, [%2] \n"
"add x6, %2, #16 \n"// x6 = %2 next
"fmla v22.4s, v16.4s, v8.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v23.4s}, [%3] \n"
"add x7, %3, #16 \n"// x7 = %3 next
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v23.4s, v16.4s, v12.4s \n"
"prfm pldl1keep, [x4, #128] \n"
"ld1 {v24.4s}, [x4] \n"
"fmla v20.4s, v17.4s, v1.4s \n"
"fmla v21.4s, v17.4s, v5.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v22.4s, v17.4s, v9.4s \n"
"fmla v23.4s, v17.4s, v13.4s \n"
"prfm pldl1keep, [x5, #128] \n"
"ld1 {v25.4s}, [x5] \n"
"fmla v20.4s, v18.4s, v2.4s \n"
"fmla v21.4s, v18.4s, v6.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v22.4s, v18.4s, v10.4s \n"
"fmla v23.4s, v18.4s, v14.4s \n"
"prfm pldl1keep, [x6, #128] \n"
"ld1 {v26.4s}, [x6] \n"
"fmla v20.4s, v19.4s, v3.4s \n"
"fmla v21.4s, v19.4s, v7.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"fmla v22.4s, v19.4s, v11.4s \n"
"fmla v23.4s, v19.4s, v15.4s \n"
///////
"prfm pldl1keep, [x7, #128] \n"
"ld1 {v27.4s}, [x7] \n"
"st1 {v20.4s}, [%0] \n"
"add %0, %0, #32 \n"
"fmla v24.4s, v16.4s, v0.4s \n"
"fmla v25.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v26.4s, v16.4s, v8.4s \n"
"fmla v27.4s, v16.4s, v12.4s \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"
"st1 {v21.4s}, [%1] \n"
"add %1, %1, #32 \n"
"fmla v24.4s, v17.4s, v1.4s \n"
"fmla v25.4s, v17.4s, v5.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v26.4s, v17.4s, v9.4s \n"
"fmla v27.4s, v17.4s, v13.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v21.4s}, [%1] \n"
"st1 {v22.4s}, [%2] \n"
"add %2, %2, #32 \n"
"fmla v24.4s, v18.4s, v2.4s \n"
"fmla v25.4s, v18.4s, v6.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v26.4s, v18.4s, v10.4s \n"
"fmla v27.4s, v18.4s, v14.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v22.4s}, [%2] \n"
"st1 {v23.4s}, [%3] \n"
"add %3, %3, #32 \n"
"fmla v24.4s, v19.4s, v3.4s \n"
"fmla v25.4s, v19.4s, v7.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"fmla v26.4s, v19.4s, v11.4s \n"
"fmla v27.4s, v19.4s, v15.4s \n"
///////
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v23.4s}, [%3] \n"
"st1 {v24.4s}, [x4] \n"
"add x4, x4, #32 \n"
"fmla v20.4s, v16.4s, v0.4s \n"
"fmla v21.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v22.4s, v16.4s, v8.4s \n"
"fmla v23.4s, v16.4s, v12.4s \n"
"prfm pldl1keep, [x4, #128] \n"
"ld1 {v24.4s}, [x4] \n"
"st1 {v25.4s}, [x5] \n"
"add x5, x5, #32 \n"
"fmla v20.4s, v17.4s, v1.4s \n"
"fmla v21.4s, v17.4s, v5.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v22.4s, v17.4s, v9.4s \n"
"fmla v23.4s, v17.4s, v13.4s \n"
"prfm pldl1keep, [x5, #128] \n"
"ld1 {v25.4s}, [x5] \n"
"st1 {v26.4s}, [x6] \n"
"add x6, x6, #32 \n"
"fmla v20.4s, v18.4s, v2.4s \n"
"fmla v21.4s, v18.4s, v6.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v22.4s, v18.4s, v10.4s \n"
"fmla v23.4s, v18.4s, v14.4s \n"
"prfm pldl1keep, [x6, #128] \n"
"ld1 {v26.4s}, [x6] \n"
"st1 {v27.4s}, [x7] \n"
"add x7, x7, #32 \n"
"fmla v20.4s, v19.4s, v3.4s \n"
"fmla v21.4s, v19.4s, v7.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"fmla v22.4s, v19.4s, v11.4s \n"
"fmla v23.4s, v19.4s, v15.4s \n"
///////
"prfm pldl1keep, [x7, #128] \n"
"ld1 {v27.4s}, [x7] \n"
"st1 {v20.4s}, [%0] \n"
"fmla v24.4s, v16.4s, v0.4s \n"
"fmla v25.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v26.4s, v16.4s, v8.4s \n"
"fmla v27.4s, v16.4s, v12.4s \n"
"st1 {v21.4s}, [%1] \n"
"fmla v24.4s, v17.4s, v1.4s \n"
"fmla v25.4s, v17.4s, v5.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v26.4s, v17.4s, v9.4s \n"
"fmla v27.4s, v17.4s, v13.4s \n"
"st1 {v22.4s}, [%2] \n"
"fmla v24.4s, v18.4s, v2.4s \n"
"fmla v25.4s, v18.4s, v6.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v26.4s, v18.4s, v10.4s \n"
"fmla v27.4s, v18.4s, v14.4s \n"
"st1 {v23.4s}, [%3] \n"
"fmla v24.4s, v19.4s, v3.4s \n"
"fmla v25.4s, v19.4s, v7.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"fmla v26.4s, v19.4s, v11.4s \n"
"fmla v27.4s, v19.4s, v15.4s \n"
"st1 {v24.4s}, [x4], #16 \n"
"mov %0, x4 \n"
"st1 {v25.4s}, [x5], #16 \n"
"mov %1, x5 \n"
"subs w1, w1, #1 \n"
"st1 {v26.4s}, [x6], #16 \n"
"mov %2, x6 \n"
"st1 {v27.4s}, [x7], #16 \n"
"mov %3, x7 \n"
"bne 1b \n"
"sub %4, %4, #16 \n"
//END tile loop
"2: \n"
// remain loop
"and w1, %w18, #3 \n"// w1 = remain = tiles & 3;
"cmp w1, #0 \n"
"beq 4f \n"
//BEGIN remain loop
"3: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"
"fmla v20.4s, v16.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v21.4s}, [%1] \n"
"fmla v21.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v22.4s}, [%2] \n"
"fmla v22.4s, v16.4s, v8.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v23.4s}, [%3] \n"
"fmla v23.4s, v16.4s, v12.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v20.4s, v17.4s, v1.4s \n"
"fmla v21.4s, v17.4s, v5.4s \n"
"fmla v22.4s, v17.4s, v9.4s \n"
"fmla v23.4s, v17.4s, v13.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v20.4s, v18.4s, v2.4s \n"
"fmla v21.4s, v18.4s, v6.4s \n"
"fmla v22.4s, v18.4s, v10.4s \n"
"fmla v23.4s, v18.4s, v14.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v20.4s, v19.4s, v3.4s \n"
"fmla v21.4s, v19.4s, v7.4s \n"
"fmla v22.4s, v19.4s, v11.4s \n"
"fmla v23.4s, v19.4s, v15.4s \n"
"st1 {v20.4s}, [%0], #16 \n"
"st1 {v21.4s}, [%1], #16 \n"
"subs w1, w1, #1 \n"
"st1 {v22.4s}, [%2], #16 \n"
"st1 {v23.4s}, [%3], #16 \n"
"bne 3b \n"
//END remain loop
"4: \n"
"subs w0, w0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(r2), // %6
"=r"(r3), // %7
"=r"(ktm) // %8
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"6"(r2),
"7"(r3),
"8"(ktm),
"r"(tiles) // %18
: "cc", "memory", "x0", "x1", "x4", "x5", "x6", "x7", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
#endif // __ARM_NEON && __aarch64__
for (; q+1<inch; q+=2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
#if __ARM_NEON
#if __aarch64__
asm volatile(
"mov w0, #16 \n"// w0 = r = 16
"0: \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v0.4s, v1.4s}, [%6], #32 \n"// v0 v1 = _k00 _k01
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v2.4s, v3.4s}, [%6], #32 \n"// v2 v3 = _k10 _k11
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4s, v5.4s}, [%6], #32 \n"// v4 v5 = _k20 _k21
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v6.4s, v7.4s}, [%6], #32 \n"// v6 v7 = _k30 _k31
// tile loop
"lsr w1, %w14, #2 \n"// w1 = nn = tiles >> 2
"cmp w1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"1: \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
////
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
////
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
////
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"subs w1, w1, #1 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"bne 1b \n"
"sub %4, %4, #16 \n"
//END tile loop
"2: \n"
// remain loop
"and w1, %w14, #3 \n"// w1 = remain = tiles & 3;
"cmp w1, #0 \n"
"beq 4f \n"
//BEGIN remain loop
"3: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"subs w1, w1, #1 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"bne 3b \n"
//END remain loop
"4: \n"
"subs w0, w0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(ktm) // %6
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"6"(ktm),
"r"(tiles) // %14
: "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21"
);
#else
asm volatile(
"mov r0, #16 \n"// r0 = r = 16
"0: \n"
"pld [%6, #256] \n"
"vld1.f32 {d0-d3}, [%6 :128]! \n"// q0 q1 = _k00 _k01
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6 :128]! \n"// q2 q3 = _k10 _k11
"pld [%6, #256] \n"
"vld1.f32 {d8-d11}, [%6 :128]! \n"// q4 q5 = _k20 _k21
"pld [%6, #256] \n"
"vld1.f32 {d12-d15}, [%6 :128]! \n"// q6 q7 = _k30 _k31
// tile loop
"lsr r1, %14, #2 \n"// r1 = nn = tiles >> 2
"cmp r1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"1: \n"
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
////
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
////
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
////
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"subs r1, #1 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"bne 1b \n"
"sub %4, %4, #16 \n"
//END tile loop
"2: \n"
// remain loop
"and r1, %14, #3 \n"// r1 = remain = tiles & 3;
"cmp r1, #0 \n"
"beq 4f \n"
//BEGIN remain loop
"3: \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"subs r1, #1 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"bne 3b \n"
//END remain loop
"4: \n"
"subs r0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(ktm) // %6
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"6"(ktm),
"r"(tiles) // %14
: "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13"
);
#endif // __aarch64__
#else
for (int r=0; r<16; r++)
{
for (int t=0; t<tiles; t++)
{
for (int m=0; m<4; m++)
{
output0_tm[m] += r0[m] * ktm[0 +m];
output0_tm[m] += r1[m] * ktm[4 +m];
output1_tm[m] += r0[m] * ktm[8 +m];
output1_tm[m] += r1[m] * ktm[12+m];
output2_tm[m] += r0[m] * ktm[16+m];
output2_tm[m] += r1[m] * ktm[20+m];
output3_tm[m] += r0[m] * ktm[24+m];
output3_tm[m] += r1[m] * ktm[28+m];
}
r0 += 4;
r1 += 4;
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
}
ktm += 32;
}
#endif // __ARM_NEON
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
#if __ARM_NEON
#if __aarch64__
asm volatile(
"mov w0, #16 \n"// w0 = r = 16
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4s, v1.4s}, [%5], #32 \n"// v0 v1 = _k00 _k10
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v2.4s, v3.4s}, [%5], #32 \n"// v2 v3 = _k20 _k30
// tile loop
"mov w1, %w12 \n"// w1 = tiles
"cmp w1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"1: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v17.4s}, [%0] \n"
"fmla v17.4s, v16.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v18.4s}, [%1] \n"
"fmla v18.4s, v16.4s, v1.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v19.4s}, [%2] \n"
"fmla v19.4s, v16.4s, v2.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v20.4s}, [%3] \n"
"fmla v20.4s, v16.4s, v3.4s \n"
"st1 {v17.4s}, [%0], #16 \n"
"st1 {v18.4s}, [%1], #16 \n"
"subs w1, w1, #1 \n"
"st1 {v19.4s}, [%2], #16 \n"
"st1 {v20.4s}, [%3], #16 \n"
"bne 1b \n"
//END tile loop
"2: \n"
"subs w0, w0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(ktm) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(ktm),
"r"(tiles) // %12
: "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20"
);
#else
asm volatile(
"mov r0, #16 \n"// r0 = r = 16
"0: \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5 :128]! \n"// q0 q1 = _k00 _k10
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"// q2 q3 = _k20 _k30
// tile loop
"mov r1, %12 \n"// r1 = tiles
"cmp r1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"1: \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, q1 \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, q2 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, q3 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"subs r1, #1 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"bne 1b \n"
//END tile loop
"2: \n"
"subs r0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(ktm) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(ktm),
"r"(tiles) // %12
: "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13"
);
#endif // __aarch64__
#else
for (int r=0; r<16; r++)
{
for (int t=0; t<tiles; t++)
{
for (int m=0; m<4; m++)
{
output0_tm[m] += r0[m] * ktm[0 +m];
output1_tm[m] += r0[m] * ktm[4 +m];
output2_tm[m] += r0[m] * ktm[8 +m];
output3_tm[m] += r0[m] * ktm[12+m];
}
r0 += 4;
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
}
ktm += 16;
}
#endif // __ARM_NEON
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const float* ktm = (const float*)kernel_tm.channel(nn_outch) + 8*8 * inch * (p-remain_outch_start);
out0_tm.fill(0.f);
int q = 0;
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
float* output0_tm = out0_tm;
for (int r=0; r<16; r++)
{
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(ktm); ktm += 4;
#endif // __ARM_NEON
// tile
for (int i=0; i<tiles; i++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v17.4s, %4.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k00) // %4
: "cc", "memory", "v16", "v17"
);
#else
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128]! \n"// q9 = _r0
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q9, %q4 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k00) // %4
: "cc", "memory", "q8", "q9"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
output0_tm[m] += r0[m] * ktm[m];
}
r0 += 4;
output0_tm += 4;
#endif // __ARM_NEON
}
#if !__ARM_NEON
ktm += 4;
#endif // __ARM_NEON
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#if __ARM_NEON
const float coeff[4] = { 4.f, 8.f, 16.f, 32.f };
float32x4_t _coeff = vld1q_f32(coeff);
#endif // __ARM_NEON
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
#if __ARM_NEON
float32x2_t _bias0 = vdup_n_f32(bias0);
#endif // __ARM_NEON
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
#if __ARM_NEON
const float* output0_tm0_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm0_4 = out0_tm.row(i * w_tm/8 + j + tiles);
const float* output0_tm1_0 = out0_tm.row(i * w_tm/8 + j + tiles*2);
const float* output0_tm1_4 = out0_tm.row(i * w_tm/8 + j + tiles*3);
const float* output0_tm2_0 = out0_tm.row(i * w_tm/8 + j + tiles*4);
const float* output0_tm2_4 = out0_tm.row(i * w_tm/8 + j + tiles*5);
const float* output0_tm3_0 = out0_tm.row(i * w_tm/8 + j + tiles*6);
const float* output0_tm3_4 = out0_tm.row(i * w_tm/8 + j + tiles*7);
#if __aarch64__
for (int m=0; m+3<8; m+=4)
{
float32x4_t _output0_tm0_0123 = vld1q_f32(output0_tm0_0);
float32x4_t _output0_tm0_4567 = vld1q_f32(output0_tm0_4);
float32x4_t _output0_tm1_0123 = vld1q_f32(output0_tm1_0);
float32x4_t _output0_tm1_4567 = vld1q_f32(output0_tm1_4);
float32x4_t _output0_tm2_0123 = vld1q_f32(output0_tm2_0);
float32x4_t _output0_tm2_4567 = vld1q_f32(output0_tm2_4);
float32x4_t _output0_tm3_0123 = vld1q_f32(output0_tm3_0);
float32x4_t _output0_tm3_4567 = vld1q_f32(output0_tm3_4);
float32x4x2_t _output0_tm01_00221133 = vtrnq_f32(_output0_tm0_0123, _output0_tm1_0123);
float32x4x2_t _output0_tm01_44665577 = vtrnq_f32(_output0_tm0_4567, _output0_tm1_4567);
float32x4x2_t _output0_tm23_00221133 = vtrnq_f32(_output0_tm2_0123, _output0_tm3_0123);
float32x4x2_t _output0_tm23_44665577 = vtrnq_f32(_output0_tm2_4567, _output0_tm3_4567);
// no vswp intrinsic :(
float32x4_t _output0_tm_00 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[0]), vget_low_f32(_output0_tm23_00221133.val[0]));
float32x4_t _output0_tm_11 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[1]), vget_low_f32(_output0_tm23_00221133.val[1]));
float32x4_t _output0_tm_22 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[0]), vget_high_f32(_output0_tm23_00221133.val[0]));
float32x4_t _output0_tm_33 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[1]), vget_high_f32(_output0_tm23_00221133.val[1]));
float32x4_t _output0_tm_44 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[0]), vget_low_f32(_output0_tm23_44665577.val[0]));
float32x4_t _output0_tm_55 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[1]), vget_low_f32(_output0_tm23_44665577.val[1]));
float32x4_t _output0_tm_66 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[0]), vget_high_f32(_output0_tm23_44665577.val[0]));
float32x4_t _output0_tm_77 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[1]), vget_high_f32(_output0_tm23_44665577.val[1]));
float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a);
_tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1);
_tmp0 = vaddq_f32(_tmp0, _tmp024b);
float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1);
float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[2][m], _tmp2);
vst1q_f32(&tmp[4][m], _tmp4);
float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0);
float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a);
_tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1);
_tmp5 = vaddq_f32(_tmp5, _tmp135c);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[5][m], _tmp5);
output0_tm0_0 += out0_tm.w * tiles * 2*4;
output0_tm0_4 += out0_tm.w * tiles * 2*4;
output0_tm1_0 += out0_tm.w * tiles * 2*4;
output0_tm1_4 += out0_tm.w * tiles * 2*4;
output0_tm2_0 += out0_tm.w * tiles * 2*4;
output0_tm2_4 += out0_tm.w * tiles * 2*4;
output0_tm3_0 += out0_tm.w * tiles * 2*4;
output0_tm3_4 += out0_tm.w * tiles * 2*4;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
for (int m=0; m+1<6; m+=2)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]);
float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]);
float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]);
float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]);
float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]);
float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]);
float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]);
float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]);
float32x2_t _tmp024a = vadd_f32(_t_11, _t_22);
float32x2_t _tmp135a = vsub_f32(_t_11, _t_22);
float32x2_t _tmp024b = vadd_f32(_t_33, _t_44);
float32x2_t _tmp135b = vsub_f32(_t_33, _t_44);
float32x2_t _tmp024c = vadd_f32(_t_55, _t_66);
float32x2_t _tmp135c = vsub_f32(_t_55, _t_66);
float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a);
_output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1);
_output_0 = vadd_f32(_output_0, _tmp024b);
_output_0 = vadd_f32(_output_0, _bias0);
float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1);
_output_2 = vadd_f32(_output_2, _bias0);
float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _bias0);
output0[0] = vget_lane_f32(_output_0, 0);
output1[0] = vget_lane_f32(_output_0, 1);
output0[2] = vget_lane_f32(_output_2, 0);
output1[2] = vget_lane_f32(_output_2, 1);
output0[4] = vget_lane_f32(_output_4, 0);
output1[4] = vget_lane_f32(_output_4, 1);
float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _bias0);
float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0);
_output_3 = vadd_f32(_output_3, _bias0);
float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a);
_output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1);
_output_5 = vadd_f32(_output_5, _tmp135c);
_output_5 = vadd_f32(_output_5, _bias0);
output0[1] = vget_lane_f32(_output_1, 0);
output1[1] = vget_lane_f32(_output_1, 1);
output0[3] = vget_lane_f32(_output_3, 0);
output1[3] = vget_lane_f32(_output_3, 1);
output0[5] = vget_lane_f32(_output_5, 0);
output1[5] = vget_lane_f32(_output_5, 1);
t0 += 8*2;
t1 += 8*2;
output0 += outw*2;
output1 += outw*2;
}
#else // __aarch64__
float* t0 = tmp[0];
float* t1 = tmp[1];
int step = out0_tm.w * tiles * 2*4 *4;
asm volatile(
// loop0
"vld1.f32 {d16-d17}, [%2], %21 \n"
"vld1.f32 {d18-d19}, [%3], %21 \n"
"vld1.f32 {d20-d21}, [%4], %21 \n"
"vld1.f32 {d22-d23}, [%5], %21 \n"
"vld1.f32 {d24-d25}, [%6], %21 \n"
"vld1.f32 {d26-d27}, [%7], %21 \n"
"vld1.f32 {d28-d29}, [%8], %21 \n"
"vld1.f32 {d30-d31}, [%9], %21 \n"
"vtrn.32 q8, q10 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"sub %0, %0, #112 \n"
"vst1.f32 {d30-d31}, [%1] \n"
"sub %1, %1, #112 \n"
// loop1
"vld1.f32 {d16-d17}, [%2] \n"
"vld1.f32 {d18-d19}, [%3] \n"
"vld1.f32 {d20-d21}, [%4] \n"
"vld1.f32 {d22-d23}, [%5] \n"
"vld1.f32 {d24-d25}, [%6] \n"
"vld1.f32 {d26-d27}, [%7] \n"
"vld1.f32 {d28-d29}, [%8] \n"
"vld1.f32 {d30-d31}, [%9] \n"
"vtrn.32 q8, q10 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"vst1.f32 {d30-d31}, [%1] \n"
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(output0_tm0_0), // %2
"=r"(output0_tm0_4), // %3
"=r"(output0_tm1_0), // %4
"=r"(output0_tm1_4), // %5
"=r"(output0_tm2_0), // %6
"=r"(output0_tm2_4), // %7
"=r"(output0_tm3_0), // %8
"=r"(output0_tm3_4) // %9
: "0"(t0),
"1"(t1),
"2"(output0_tm0_0),
"3"(output0_tm0_4),
"4"(output0_tm1_0),
"5"(output0_tm1_4),
"6"(output0_tm2_0),
"7"(output0_tm2_4),
"8"(output0_tm3_0),
"9"(output0_tm3_4),
"w"(_coeff), // %20
"r"(step) // %21
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
int stepw = outw*2 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop1
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop2
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(t0), // %2
"=r"(t1) // %3
: "0"(output0),
"1"(output1),
"2"(t0),
"3"(t1),
"w"(_coeff), // %8
"w"(_bias0), // %9
"r"(stepw) // %10
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* output0_tm_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm_4 = out0_tm.row(i * w_tm/8 + j + tiles);
for (int m=0; m<8; m++)
{
float tmp024a = output0_tm_0[1] + output0_tm_0[2];
float tmp135a = output0_tm_0[1] - output0_tm_0[2];
float tmp024b = output0_tm_0[3] + output0_tm_4[0];
float tmp135b = output0_tm_0[3] - output0_tm_4[0];
float tmp024c = output0_tm_4[1] + output0_tm_4[2];
float tmp135c = output0_tm_4[1] - output0_tm_4[2];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_4[3] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += out0_tm.w * tiles * 2;
output0_tm_4 += out0_tm.w * tiles * 2;
}
float* output0 = out0.row(i * 6) + j * 6;
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
#endif // __ARM_NEON
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads);
}
static void conv3x3s1_winograd64_neon5(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
bottom_blob_tm.create(1, 64 * tiles, inch, 4u, opt.workspace_allocator);
// bottom_blob_tm.create(inch, tiles, 64);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#if __ARM_NEON
const float coeff[8] = {
0.25f, 0.5f, -1.25f, 2.f,
-2.5f, 4.f, 4.25f, 5.25f
};
float32x4_t _coeff0 = vld1q_f32(coeff);
float32x4_t _coeff1 = vld1q_f32(coeff+4);
#endif // __ARM_NEON
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
#if __ARM_NEON
const float* r0 = img0.row(i * 6) + j * 6;
const float* r1 = r0 + w;
const float* r2 = r0 + w*2;
const float* r3 = r0 + w*3;
#if __aarch64__
for (int m=0; m+3<8; m+=4)
{
float32x4_t _r0_0123 = vld1q_f32(r0);
float32x4_t _r0_4567 = vld1q_f32(r0+4);
float32x4_t _r1_0123 = vld1q_f32(r1);
float32x4_t _r1_4567 = vld1q_f32(r1+4);
float32x4_t _r2_0123 = vld1q_f32(r2);
float32x4_t _r2_4567 = vld1q_f32(r2+4);
float32x4_t _r3_0123 = vld1q_f32(r3);
float32x4_t _r3_4567 = vld1q_f32(r3+4);
float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123);
float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567);
float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123);
float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567);
// no vswp intrinsic :(
float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0]));
float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1]));
float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0]));
float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1]));
float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0]));
float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1]));
float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0]));
float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1]));
float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66);
float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11);
float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22);
float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55);
float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[7][m], _tmp7);
float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66);
float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0);
float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[2][m], _tmp2);
float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0);
float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1);
float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[4][m], _tmp4);
// reuse r04 * 1.25
// reuse r03 * 2.5
float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1);
float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(&tmp[5][m], _tmp5);
vst1q_f32(&tmp[6][m], _tmp6);
r0 += w*4;
r1 += w*4;
r2 += w*4;
r3 += w*4;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
const float* t2 = tmp[2];
const float* t3 = tmp[3];
float* r0_tm0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles*8);
float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles*16);
float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles*24);
for (int m=0; m+3<8; m+=4)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4_t _t2_0123 = vld1q_f32(t2);
float32x4_t _t2_4567 = vld1q_f32(t2+4);
float32x4_t _t3_0123 = vld1q_f32(t3);
float32x4_t _t3_4567 = vld1q_f32(t3+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123);
float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567);
// no vswp intrinsic :(
float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0]));
float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1]));
float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0]));
float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1]));
float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0]));
float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1]));
float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0]));
float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1]));
float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66);
float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11);
float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22);
float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55);
float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_0, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_0, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_0, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_0, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66);
float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0);
float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_1, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_1, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_1, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_1, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_2, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_2, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_2, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_2, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0);
float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1);
float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_3, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_3, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_3, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_3, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_0, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_0, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_0, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_0, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1);
float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_1, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_1, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_1, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_1, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_2, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_2, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_2, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_2, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_3, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_3, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_3, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_3, 3);
t0 += 8*4;
t1 += 8*4;
t2 += 8*4;
t3 += 8*4;
r0_tm0 += img0_tm.w*tiles*25;
r0_tm1 += img0_tm.w*tiles*25;
r0_tm2 += img0_tm.w*tiles*25;
r0_tm3 += img0_tm.w*tiles*25;
}
#else // __aarch64__
float* t0 = tmp[0];
float* t1 = tmp[1];
float* t2 = tmp[2];
float* t3 = tmp[3];
float* t4 = tmp[4];
float* t5 = tmp[5];
float* t6 = tmp[6];
float* t7 = tmp[7];
int stepw = w*4*4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8], %26 \n"
"vld1.f32 {d20-d23}, [%9], %26 \n"
"vld1.f32 {d24-d27}, [%10], %26 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11], %26 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m]
"vmov q3, q7 \n"// use q7
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m]
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m]
"vmov q3, q7 \n"// use q7
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m]
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(t2), // %2
"=r"(t3), // %3
"=r"(t4), // %4
"=r"(t5), // %5
"=r"(t6), // %6
"=r"(t7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(r3) // %11
: "0"(t0),
"1"(t1),
"2"(t2),
"3"(t3),
"4"(t4),
"5"(t5),
"6"(t6),
"7"(t7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(r3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(stepw) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
t2 = tmp[2];
t3 = tmp[3];
float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*8);
float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*16);
float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*24);
float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles*32);
float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*40);
float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*48);
float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*56);
int step = img0_tm.w*tiles*4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8] \n"
"add %8, %8, #128 \n"
"vld1.f32 {d20-d23}, [%9] \n"
"add %9, %9, #128 \n"
"vld1.f32 {d24-d27}, [%10] \n"
"add %10, %10, #128 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"add %11, %11, #128 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%0], %26 \n"
"vst1.f32 {d4[1]}, [%1], %26 \n"
"vmov q3, q7 \n"// use q7
"vst1.f32 {d5[0]}, [%2], %26 \n"
"vst1.f32 {d5[1]}, [%3], %26 \n"
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%0], %26 \n"
"vst1.f32 {d16[1]}, [%1], %26 \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%2], %26 \n"
"vst1.f32 {d17[1]}, [%3], %26 \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%0], %26 \n"
"vst1.f32 {d18[1]}, [%1], %26 \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%2], %26 \n"
"vst1.f32 {d19[1]}, [%3], %26 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vst1.f32 {d16[0]}, [%0], %26 \n"
"vst1.f32 {d16[1]}, [%1], %26 \n"
"vst1.f32 {d17[0]}, [%2], %26 \n"
"vst1.f32 {d17[1]}, [%3], %26 \n"
"vadd.f32 q2, q4, q5 \n"
"vst1.f32 {d18[0]}, [%0], %26 \n"
"vst1.f32 {d18[1]}, [%1], %26 \n"
"vst1.f32 {d19[0]}, [%2], %26 \n"
"vst1.f32 {d19[1]}, [%3], %26 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d4[0]}, [%0], %26 \n"
"vst1.f32 {d4[1]}, [%1], %26 \n"
"vst1.f32 {d5[0]}, [%2], %26 \n"
"vst1.f32 {d5[1]}, [%3], %26 \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d6[0]}, [%0], %26 \n"
"vst1.f32 {d6[1]}, [%1], %26 \n"
"vst1.f32 {d7[0]}, [%2], %26 \n"
"vst1.f32 {d7[1]}, [%3], %26 \n"
"vst1.f32 {d12[0]}, [%0] \n"
"vst1.f32 {d12[1]}, [%1] \n"
"vst1.f32 {d13[0]}, [%2] \n"
"vst1.f32 {d13[1]}, [%3] \n"
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%4], %26 \n"
"vst1.f32 {d4[1]}, [%5], %26 \n"
"vmov q3, q7 \n"// use q7
"vst1.f32 {d5[0]}, [%6], %26 \n"
"vst1.f32 {d5[1]}, [%7], %26 \n"
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%4], %26 \n"
"vst1.f32 {d16[1]}, [%5], %26 \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%6], %26 \n"
"vst1.f32 {d17[1]}, [%7], %26 \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%4], %26 \n"
"vst1.f32 {d18[1]}, [%5], %26 \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%6], %26 \n"
"vst1.f32 {d19[1]}, [%7], %26 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vst1.f32 {d16[0]}, [%4], %26 \n"
"vst1.f32 {d16[1]}, [%5], %26 \n"
"vst1.f32 {d17[0]}, [%6], %26 \n"
"vst1.f32 {d17[1]}, [%7], %26 \n"
"vadd.f32 q2, q4, q5 \n"
"vst1.f32 {d18[0]}, [%4], %26 \n"
"vst1.f32 {d18[1]}, [%5], %26 \n"
"vst1.f32 {d19[0]}, [%6], %26 \n"
"vst1.f32 {d19[1]}, [%7], %26 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d4[0]}, [%4], %26 \n"
"vst1.f32 {d4[1]}, [%5], %26 \n"
"vst1.f32 {d5[0]}, [%6], %26 \n"
"vst1.f32 {d5[1]}, [%7], %26 \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d6[0]}, [%4], %26 \n"
"vst1.f32 {d6[1]}, [%5], %26 \n"
"vst1.f32 {d7[0]}, [%6], %26 \n"
"vst1.f32 {d7[1]}, [%7], %26 \n"
"vst1.f32 {d12[0]}, [%4] \n"
"vst1.f32 {d12[1]}, [%5] \n"
"vst1.f32 {d13[0]}, [%6] \n"
"vst1.f32 {d13[1]}, [%7] \n"
: "=r"(r0_tm0_0), // %0
"=r"(r0_tm1_0), // %1
"=r"(r0_tm2_0), // %2
"=r"(r0_tm3_0), // %3
"=r"(r0_tm0_4), // %4
"=r"(r0_tm1_4), // %5
"=r"(r0_tm2_4), // %6
"=r"(r0_tm3_4), // %7
"=r"(t0), // %8
"=r"(t1), // %9
"=r"(t2), // %10
"=r"(t3) // %11
: "0"(r0_tm0_0),
"1"(r0_tm1_0),
"2"(r0_tm2_0),
"3"(r0_tm3_0),
"4"(r0_tm0_4),
"5"(r0_tm1_4),
"6"(r0_tm2_4),
"7"(r0_tm3_4),
"8"(t0),
"9"(t1),
"10"(t2),
"11"(t3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(step) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* r0 = img0.row(i * 6) + j * 6;
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f);
float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f);
float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm_1 = img0_tm.row(i * w_tm/8 + j + tiles);
float* r0_tm_2 = img0_tm.row(i * w_tm/8 + j + tiles*2);
float* r0_tm_3 = img0_tm.row(i * w_tm/8 + j + tiles*3);
float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles*4);
float* r0_tm_5 = img0_tm.row(i * w_tm/8 + j + tiles*5);
float* r0_tm_6 = img0_tm.row(i * w_tm/8 + j + tiles*6);
float* r0_tm_7 = img0_tm.row(i * w_tm/8 + j + tiles*7);
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f;
r0_tm_7[0] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]);
r0_tm_1[0] = tmp12a + tmp12b;
r0_tm_2[0] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f);
float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f);
r0_tm_3[0] = tmp34a + tmp34b;
r0_tm_4[0] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f);
float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f);
r0_tm_5[0] = tmp56a + tmp56b;
r0_tm_6[0] = tmp56a - tmp56b;
r0_tm_0 += img0_tm.w * tiles * 8;
r0_tm_1 += img0_tm.w * tiles * 8;
r0_tm_2 += img0_tm.w * tiles * 8;
r0_tm_3 += img0_tm.w * tiles * 8;
r0_tm_4 += img0_tm.w * tiles * 8;
r0_tm_5 += img0_tm.w * tiles * 8;
r0_tm_6 += img0_tm.w * tiles * 8;
r0_tm_7 += img0_tm.w * tiles * 8;
}
#endif // __ARM_NEON
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
// permute
// bottom_blob_tm.create(1, 64 * tiles, inch);
// Mat bottom_blob_tm2(inch, tiles, 64);
Mat bottom_blob_tm2(8*inch, tiles/8 + (tiles%8)/4 + tiles%4, 64, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r=0; r<64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
float* tm2p = tm2.row(i/8);
const float* r0 = bottom_blob_tm;
r0 += r*tiles + i;
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
vst1q_f32(tm2p, _r0);
vst1q_f32(tm2p+4, _r0n);
#else
tm2p[0] = r0[0];
tm2p[1] = r0[1];
tm2p[2] = r0[2];
tm2p[3] = r0[3];
tm2p[4] = r0[4];
tm2p[5] = r0[5];
tm2p[6] = r0[6];
tm2p[7] = r0[7];
#endif // __ARM_NEON
r0 += bottom_blob_tm.cstep;
tm2p += 8;
}
}
for (; i+3<tiles; i+=4)
{
float* tm2p = tm2.row(i/8+(i%8)/4);
const float* r0 = bottom_blob_tm;
r0 += r*tiles + i;
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
vst1q_f32(tm2p, _r0);
#else
tm2p[0] = r0[0];
tm2p[1] = r0[1];
tm2p[2] = r0[2];
tm2p[3] = r0[3];
#endif // __ARM_NEON
r0 += bottom_blob_tm.cstep;
tm2p += 4;
}
}
for (; i<tiles; i++)
{
float* tm2p = tm2.row(i/8+(i%8)/4+i%4);
const float* r0 = bottom_blob_tm;
r0 += r*tiles + i;
for (int q=0; q<inch; q++)
{
tm2p[0] = r0[0];
r0 += bottom_blob_tm.cstep;
tm2p += 1;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(1, 64 * tiles, outch);
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
const Mat kernel_tm0 = kernel_tm.channel(p/8);
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
Mat out4_tm = top_blob_tm.channel(p+4);
Mat out5_tm = top_blob_tm.channel(p+5);
Mat out6_tm = top_blob_tm.channel(p+6);
Mat out7_tm = top_blob_tm.channel(p+7);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
float* output4_tm = out4_tm;
float* output5_tm = out5_tm;
float* output6_tm = out6_tm;
float* output7_tm = out7_tm;
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
const float* bb2p0 = bb2.row(i/8);
const float* ktm0 = kernel_tm0.row(r);
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
// inch loop
"lsr w4, %w20, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"fmla v16.4s, v10.4s, v2.s[0] \n"
"fmla v17.4s, v11.4s, v2.s[0] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v11.4s, v2.s[1] \n"
"fmla v20.4s, v10.4s, v2.s[2] \n"
"fmla v21.4s, v11.4s, v2.s[2] \n"
"fmla v22.4s, v10.4s, v2.s[3] \n"
"fmla v23.4s, v11.4s, v2.s[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"
"fmla v24.4s, v10.4s, v3.s[0] \n"
"fmla v25.4s, v11.4s, v3.s[0] \n"
"fmla v26.4s, v10.4s, v3.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v10.4s, v3.s[2] \n"
"fmla v29.4s, v11.4s, v3.s[2] \n"
"fmla v30.4s, v10.4s, v3.s[3] \n"
"fmla v31.4s, v11.4s, v3.s[3] \n"
"fmla v16.4s, v12.4s, v4.s[0] \n"
"fmla v17.4s, v13.4s, v4.s[0] \n"
"fmla v18.4s, v12.4s, v4.s[1] \n"
"fmla v19.4s, v13.4s, v4.s[1] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v13.4s, v4.s[2] \n"
"fmla v22.4s, v12.4s, v4.s[3] \n"
"fmla v23.4s, v13.4s, v4.s[3] \n"
"fmla v24.4s, v12.4s, v5.s[0] \n"
"fmla v25.4s, v13.4s, v5.s[0] \n"
"fmla v26.4s, v12.4s, v5.s[1] \n"
"fmla v27.4s, v13.4s, v5.s[1] \n"
"fmla v28.4s, v12.4s, v5.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v12.4s, v5.s[3] \n"
"fmla v31.4s, v13.4s, v5.s[3] \n"
"fmla v16.4s, v14.4s, v6.s[0] \n"
"fmla v17.4s, v15.4s, v6.s[0] \n"
"fmla v18.4s, v14.4s, v6.s[1] \n"
"fmla v19.4s, v15.4s, v6.s[1] \n"
"fmla v20.4s, v14.4s, v6.s[2] \n"
"fmla v21.4s, v15.4s, v6.s[2] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v15.4s, v6.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v14.4s, v7.s[0] \n"
"fmla v25.4s, v15.4s, v7.s[0] \n"
"fmla v26.4s, v14.4s, v7.s[1] \n"
"fmla v27.4s, v15.4s, v7.s[1] \n"
"fmla v28.4s, v14.4s, v7.s[2] \n"
"fmla v29.4s, v15.4s, v7.s[2] \n"
"fmla v30.4s, v14.4s, v7.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w20, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v8.4s, v9.4s}, [%8], #32 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
"st1 {v18.4s, v19.4s}, [%1], #32 \n"
"st1 {v20.4s, v21.4s}, [%2], #32 \n"
"st1 {v22.4s, v23.4s}, [%3], #32 \n"
"st1 {v24.4s, v25.4s}, [%4], #32 \n"
"st1 {v26.4s, v27.4s}, [%5], #32 \n"
"st1 {v28.4s, v29.4s}, [%6], #32 \n"
"st1 {v30.4s, v31.4s}, [%7], #32 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(bb2p0), // %8
"=r"(ktm0) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(bb2p0),
"9"(ktm0),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+3<tiles; i+=4)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4);
const float* ktm0 = kernel_tm0.row(r);
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
// inch loop
"lsr w4, %w20, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v9.4s, v2.s[0] \n"
"fmla v17.4s, v9.4s, v2.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[2] \n"
"fmla v19.4s, v9.4s, v2.s[3] \n"
"fmla v20.4s, v9.4s, v3.s[0] \n"
"fmla v21.4s, v9.4s, v3.s[1] \n"
"fmla v22.4s, v9.4s, v3.s[2] \n"
"fmla v23.4s, v9.4s, v3.s[3] \n"
"fmla v16.4s, v10.4s, v4.s[0] \n"
"fmla v17.4s, v10.4s, v4.s[1] \n"
"fmla v18.4s, v10.4s, v4.s[2] \n"
"fmla v19.4s, v10.4s, v4.s[3] \n"
"fmla v20.4s, v10.4s, v5.s[0] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v5.s[2] \n"
"fmla v23.4s, v10.4s, v5.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v11.4s, v6.s[0] \n"
"fmla v17.4s, v11.4s, v6.s[1] \n"
"fmla v18.4s, v11.4s, v6.s[2] \n"
"fmla v19.4s, v11.4s, v6.s[3] \n"
"fmla v20.4s, v11.4s, v7.s[0] \n"
"fmla v21.4s, v11.4s, v7.s[1] \n"
"fmla v22.4s, v11.4s, v7.s[2] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w20, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"st1 {v20.4s}, [%4], #16 \n"
"st1 {v21.4s}, [%5], #16 \n"
"st1 {v22.4s}, [%6], #16 \n"
"st1 {v23.4s}, [%7], #16 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(bb2p0), // %8
"=r"(ktm0) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(bb2p0),
"9"(ktm0),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i<tiles; i++)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4);
const float* ktm0 = kernel_tm0.row(r);
float32x4_t _sum0123 = vdupq_n_f32(0.f);
float32x4_t _sum4567 = vdupq_n_f32(0.f);
int q=0;
for (; q+3<inch; q+=4)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :);
float32x4_t _bb2p0 = vld1q_f32(bb2p0);
bb2p0 += 4;
// asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :);
float32x4_t _ktm0 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm1 = vld1q_f32(ktm0 + 4);
float32x4_t _ktm2 = vld1q_f32(ktm0 + 8);
float32x4_t _ktm3 = vld1q_f32(ktm0 + 12);
ktm0 += 16;
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm0, _bb2p0, 0);
_sum4567 = vmlaq_laneq_f32(_sum4567, _ktm1, _bb2p0, 0);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm2, _bb2p0, 1);
_sum4567 = vmlaq_laneq_f32(_sum4567, _ktm3, _bb2p0, 1);
// asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :);
float32x4_t _ktm4 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm5 = vld1q_f32(ktm0 + 4);
float32x4_t _ktm6 = vld1q_f32(ktm0 + 8);
float32x4_t _ktm7 = vld1q_f32(ktm0 + 12);
ktm0 += 16;
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm4, _bb2p0, 2);
_sum4567 = vmlaq_laneq_f32(_sum4567, _ktm5, _bb2p0, 2);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm6, _bb2p0, 3);
_sum4567 = vmlaq_laneq_f32(_sum4567, _ktm7, _bb2p0, 3);
}
for (; q<inch; q++)
{
float32x4_t _bb2p0 = vld1q_dup_f32(bb2p0);
float32x4_t _ktm0123 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm4567 = vld1q_f32(ktm0 + 4);
_sum0123 = vmlaq_f32(_sum0123, _bb2p0, _ktm0123);
_sum4567 = vmlaq_f32(_sum4567, _bb2p0, _ktm4567);
bb2p0 += 1;
ktm0 += 8;
}
float sum0 = vgetq_lane_f32(_sum0123, 0);
float sum1 = vgetq_lane_f32(_sum0123, 1);
float sum2 = vgetq_lane_f32(_sum0123, 2);
float sum3 = vgetq_lane_f32(_sum0123, 3);
float sum4 = vgetq_lane_f32(_sum4567, 0);
float sum5 = vgetq_lane_f32(_sum4567, 1);
float sum6 = vgetq_lane_f32(_sum4567, 2);
float sum7 = vgetq_lane_f32(_sum4567, 3);
output0_tm[0] = sum0;
output1_tm[0] = sum1;
output2_tm[0] = sum2;
output3_tm[0] = sum3;
output4_tm[0] = sum4;
output5_tm[0] = sum5;
output6_tm[0] = sum6;
output7_tm[0] = sum7;
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
output4_tm += 1;
output5_tm += 1;
output6_tm += 1;
output7_tm += 1;
}
}
}
#endif // __aarch64__
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
#if __ARM_NEON && __aarch64__
const Mat kernel_tm0 = kernel_tm.channel(p/8+(p%8)/4);
#else
const Mat kernel_tm0 = kernel_tm.channel(p/4);
#endif
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
const float* bb2p0 = bb2.row(i/8);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
// inch loop
"lsr w4, %w12, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v7.4s, v1.s[0] \n"
"fmla v10.4s, v6.4s, v1.s[1] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"fmla v12.4s, v6.4s, v1.s[2] \n"
"fmla v13.4s, v7.4s, v1.s[2] \n"
"fmla v14.4s, v6.4s, v1.s[3] \n"
"fmla v15.4s, v7.4s, v1.s[3] \n"
"fmla v8.4s, v16.4s, v2.s[0] \n"
"fmla v9.4s, v17.4s, v2.s[0] \n"
"fmla v10.4s, v16.4s, v2.s[1] \n"
"fmla v11.4s, v17.4s, v2.s[1] \n"
"fmla v12.4s, v16.4s, v2.s[2] \n"
"fmla v13.4s, v17.4s, v2.s[2] \n"
"fmla v14.4s, v16.4s, v2.s[3] \n"
"fmla v15.4s, v17.4s, v2.s[3] \n"
"fmla v8.4s, v18.4s, v3.s[0] \n"
"fmla v9.4s, v19.4s, v3.s[0] \n"
"fmla v10.4s, v18.4s, v3.s[1] \n"
"fmla v11.4s, v19.4s, v3.s[1] \n"
"fmla v12.4s, v18.4s, v3.s[2] \n"
"fmla v13.4s, v19.4s, v3.s[2] \n"
"fmla v14.4s, v18.4s, v3.s[3] \n"
"fmla v15.4s, v19.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4], #32 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"st1 {v12.4s, v13.4s}, [%2], #32 \n"
"st1 {v14.4s, v15.4s}, [%3], #32 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"veor q12, q12, q12 \n"
"veor q13, q13, q13 \n"
"veor q14, q14, q14 \n"
"veor q15, q15, q15 \n"
// inch loop
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q15, q5, d1[1] \n"
"vmla.f32 q8, q6, d2[0] \n"
"vmla.f32 q9, q7, d2[0] \n"
"vmla.f32 q10, q6, d2[1] \n"
"vmla.f32 q11, q7, d2[1] \n"
"vmla.f32 q12, q6, d3[0] \n"
"vmla.f32 q13, q7, d3[0] \n"
"vmla.f32 q14, q6, d3[1] \n"
"vmla.f32 q15, q7, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q9, q5, d4[0] \n"
"vmla.f32 q10, q4, d4[1] \n"
"vmla.f32 q11, q5, d4[1] \n"
"vmla.f32 q12, q4, d5[0] \n"
"vmla.f32 q13, q5, d5[0] \n"
"vmla.f32 q14, q4, d5[1] \n"
"vmla.f32 q15, q5, d5[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d6[0] \n"
"vmla.f32 q9, q7, d6[0] \n"
"vmla.f32 q10, q6, d6[1] \n"
"vmla.f32 q11, q7, d6[1] \n"
"vmla.f32 q12, q6, d7[0] \n"
"vmla.f32 q13, q7, d7[0] \n"
"vmla.f32 q14, q6, d7[1] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = tiles & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q11, q5, d0[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q15, q5, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0]! \n"
"vst1.f32 {d20-d23}, [%1]! \n"
"vst1.f32 {d24-d27}, [%2]! \n"
"vst1.f32 {d28-d31}, [%3]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
float sum0_0 = 0.f;
float sum0_1 = 0.f;
float sum0_2 = 0.f;
float sum0_3 = 0.f;
float sum0_4 = 0.f;
float sum0_5 = 0.f;
float sum0_6 = 0.f;
float sum0_7 = 0.f;
float sum1_0 = 0.f;
float sum1_1 = 0.f;
float sum1_2 = 0.f;
float sum1_3 = 0.f;
float sum1_4 = 0.f;
float sum1_5 = 0.f;
float sum1_6 = 0.f;
float sum1_7 = 0.f;
float sum2_0 = 0.f;
float sum2_1 = 0.f;
float sum2_2 = 0.f;
float sum2_3 = 0.f;
float sum2_4 = 0.f;
float sum2_5 = 0.f;
float sum2_6 = 0.f;
float sum2_7 = 0.f;
float sum3_0 = 0.f;
float sum3_1 = 0.f;
float sum3_2 = 0.f;
float sum3_3 = 0.f;
float sum3_4 = 0.f;
float sum3_5 = 0.f;
float sum3_6 = 0.f;
float sum3_7 = 0.f;
for (int q=0; q<inch; q++)
{
sum0_0 += bb2p0[0] * ktm0[0];
sum0_1 += bb2p0[1] * ktm0[0];
sum0_2 += bb2p0[2] * ktm0[0];
sum0_3 += bb2p0[3] * ktm0[0];
sum0_4 += bb2p0[4] * ktm0[0];
sum0_5 += bb2p0[5] * ktm0[0];
sum0_6 += bb2p0[6] * ktm0[0];
sum0_7 += bb2p0[7] * ktm0[0];
sum1_0 += bb2p0[0] * ktm0[1];
sum1_1 += bb2p0[1] * ktm0[1];
sum1_2 += bb2p0[2] * ktm0[1];
sum1_3 += bb2p0[3] * ktm0[1];
sum1_4 += bb2p0[4] * ktm0[1];
sum1_5 += bb2p0[5] * ktm0[1];
sum1_6 += bb2p0[6] * ktm0[1];
sum1_7 += bb2p0[7] * ktm0[1];
sum2_0 += bb2p0[0] * ktm0[2];
sum2_1 += bb2p0[1] * ktm0[2];
sum2_2 += bb2p0[2] * ktm0[2];
sum2_3 += bb2p0[3] * ktm0[2];
sum2_4 += bb2p0[4] * ktm0[2];
sum2_5 += bb2p0[5] * ktm0[2];
sum2_6 += bb2p0[6] * ktm0[2];
sum2_7 += bb2p0[7] * ktm0[2];
sum3_0 += bb2p0[0] * ktm0[3];
sum3_1 += bb2p0[1] * ktm0[3];
sum3_2 += bb2p0[2] * ktm0[3];
sum3_3 += bb2p0[3] * ktm0[3];
sum3_4 += bb2p0[4] * ktm0[3];
sum3_5 += bb2p0[5] * ktm0[3];
sum3_6 += bb2p0[6] * ktm0[3];
sum3_7 += bb2p0[7] * ktm0[3];
bb2p0 += 8;
ktm0 += 4;
}
output0_tm[0] = sum0_0;
output0_tm[1] = sum0_1;
output0_tm[2] = sum0_2;
output0_tm[3] = sum0_3;
output0_tm[4] = sum0_4;
output0_tm[5] = sum0_5;
output0_tm[6] = sum0_6;
output0_tm[7] = sum0_7;
output1_tm[0] = sum1_0;
output1_tm[1] = sum1_1;
output1_tm[2] = sum1_2;
output1_tm[3] = sum1_3;
output1_tm[4] = sum1_4;
output1_tm[5] = sum1_5;
output1_tm[6] = sum1_6;
output1_tm[7] = sum1_7;
output2_tm[0] = sum2_0;
output2_tm[1] = sum2_1;
output2_tm[2] = sum2_2;
output2_tm[3] = sum2_3;
output2_tm[4] = sum2_4;
output2_tm[5] = sum2_5;
output2_tm[6] = sum2_6;
output2_tm[7] = sum2_7;
output3_tm[0] = sum3_0;
output3_tm[1] = sum3_1;
output3_tm[2] = sum3_2;
output3_tm[3] = sum3_3;
output3_tm[4] = sum3_4;
output3_tm[5] = sum3_5;
output3_tm[6] = sum3_6;
output3_tm[7] = sum3_7;
output0_tm += 8;
output1_tm += 8;
output2_tm += 8;
output3_tm += 8;
#endif // __ARM_NEON
}
for (; i+3<tiles; i+=4)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
// inch loop
"lsr w4, %w12, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v8.4s, v5.4s, v1.s[0] \n"
"fmla v9.4s, v5.4s, v1.s[1] \n"
"fmla v10.4s, v5.4s, v1.s[2] \n"
"fmla v11.4s, v5.4s, v1.s[3] \n"
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"fmla v8.4s, v7.4s, v3.s[0] \n"
"fmla v9.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v7.4s, v3.s[2] \n"
"fmla v11.4s, v7.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%2], #16 \n"
"st1 {v11.4s}, [%3], #16 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
// inch loop
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q8, q5, d2[0] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d3[0] \n"
"vmla.f32 q11, q5, d3[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vmla.f32 q8, q7, d6[0] \n"
"vmla.f32 q9, q7, d6[1] \n"
"vmla.f32 q10, q7, d7[0] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = tiles & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0]! \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"vst1.f32 {d20-d21}, [%2]! \n"
"vst1.f32 {d22-d23}, [%3]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
#else
float sum0_0 = 0.f;
float sum0_1 = 0.f;
float sum0_2 = 0.f;
float sum0_3 = 0.f;
float sum1_0 = 0.f;
float sum1_1 = 0.f;
float sum1_2 = 0.f;
float sum1_3 = 0.f;
float sum2_0 = 0.f;
float sum2_1 = 0.f;
float sum2_2 = 0.f;
float sum2_3 = 0.f;
float sum3_0 = 0.f;
float sum3_1 = 0.f;
float sum3_2 = 0.f;
float sum3_3 = 0.f;
for (int q=0; q<inch; q++)
{
sum0_0 += bb2p0[0] * ktm0[0];
sum0_1 += bb2p0[1] * ktm0[0];
sum0_2 += bb2p0[2] * ktm0[0];
sum0_3 += bb2p0[3] * ktm0[0];
sum1_0 += bb2p0[0] * ktm0[1];
sum1_1 += bb2p0[1] * ktm0[1];
sum1_2 += bb2p0[2] * ktm0[1];
sum1_3 += bb2p0[3] * ktm0[1];
sum2_0 += bb2p0[0] * ktm0[2];
sum2_1 += bb2p0[1] * ktm0[2];
sum2_2 += bb2p0[2] * ktm0[2];
sum2_3 += bb2p0[3] * ktm0[2];
sum3_0 += bb2p0[0] * ktm0[3];
sum3_1 += bb2p0[1] * ktm0[3];
sum3_2 += bb2p0[2] * ktm0[3];
sum3_3 += bb2p0[3] * ktm0[3];
bb2p0 += 4;
ktm0 += 4;
}
output0_tm[0] = sum0_0;
output0_tm[1] = sum0_1;
output0_tm[2] = sum0_2;
output0_tm[3] = sum0_3;
output1_tm[0] = sum1_0;
output1_tm[1] = sum1_1;
output1_tm[2] = sum1_2;
output1_tm[3] = sum1_3;
output2_tm[0] = sum2_0;
output2_tm[1] = sum2_1;
output2_tm[2] = sum2_2;
output2_tm[3] = sum2_3;
output3_tm[0] = sum3_0;
output3_tm[1] = sum3_1;
output3_tm[2] = sum3_2;
output3_tm[3] = sum3_3;
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
#endif // __ARM_NEON
}
for (; i<tiles; i++)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
float32x4_t _sum0123 = vdupq_n_f32(0.f);
int q=0;
for (; q+3<inch; q+=4)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :);
float32x4_t _bb2p0 = vld1q_f32(bb2p0);
bb2p0 += 4;
// asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :);
float32x4_t _ktm0 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm1 = vld1q_f32(ktm0 + 4);
float32x4_t _ktm2 = vld1q_f32(ktm0 + 8);
float32x4_t _ktm3 = vld1q_f32(ktm0 + 12);
ktm0 += 16;
#if __aarch64__
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm0, _bb2p0, 0);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm1, _bb2p0, 1);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm2, _bb2p0, 2);
_sum0123 = vmlaq_laneq_f32(_sum0123, _ktm3, _bb2p0, 3);
#else
_sum0123 = vmlaq_lane_f32(_sum0123, _ktm0, vget_low_f32(_bb2p0), 0);
_sum0123 = vmlaq_lane_f32(_sum0123, _ktm1, vget_low_f32(_bb2p0), 1);
_sum0123 = vmlaq_lane_f32(_sum0123, _ktm2, vget_high_f32(_bb2p0), 0);
_sum0123 = vmlaq_lane_f32(_sum0123, _ktm3, vget_high_f32(_bb2p0), 1);
#endif // __aarch64__
}
for (; q<inch; q++)
{
float32x4_t _bb2p0 = vld1q_dup_f32(bb2p0);
float32x4_t _ktm0 = vld1q_f32(ktm0);
_sum0123 = vmlaq_f32(_sum0123, _bb2p0, _ktm0);
bb2p0 += 1;
ktm0 += 4;
}
float sum0 = vgetq_lane_f32(_sum0123, 0);
float sum1 = vgetq_lane_f32(_sum0123, 1);
float sum2 = vgetq_lane_f32(_sum0123, 2);
float sum3 = vgetq_lane_f32(_sum0123, 3);
#else
float sum0 = 0.f;
float sum1 = 0.f;
float sum2 = 0.f;
float sum3 = 0.f;
for (int q=0; q<inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
sum1 += bb2p0[0] * ktm0[1];
sum2 += bb2p0[0] * ktm0[2];
sum3 += bb2p0[0] * ktm0[3];
bb2p0 += 1;
ktm0 += 4;
}
#endif // __ARM_NEON
output0_tm[0] = sum0;
output1_tm[0] = sum1;
output2_tm[0] = sum2;
output3_tm[0] = sum3;
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
}
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
#if __ARM_NEON && __aarch64__
const Mat kernel_tm0 = kernel_tm.channel(p/8+(p%8)/4+p%4);
#else
const Mat kernel_tm0 = kernel_tm.channel(p/4+p%4);
#endif
Mat out0_tm = top_blob_tm.channel(p);
float* output0_tm = out0_tm;
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
const float* bb2p0 = bb2.row(i/8);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
// inch loop
"lsr w4, %w6, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v8.4s, v6.4s, v0.s[1] \n"
"fmla v9.4s, v7.4s, v0.s[1] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"fmla v8.4s, v12.4s, v0.s[2] \n"
"fmla v9.4s, v13.4s, v0.s[2] \n"
"fmla v8.4s, v14.4s, v0.s[3] \n"
"fmla v9.4s, v15.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w6, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4s, v5.4s}, [%1], #32 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"fmla v9.4s, v5.4s, v0.4s \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15"
);
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
// inch loop
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q8, q6, d0[1] \n"
"vmla.f32 q9, q7, d0[1] \n"
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n"
// "vld1.f32 {d24-d27}, [%1 :128]! \n"
// "vld1.f32 {d28-d31}, [%1 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q12, d1[0] \n"
"vmla.f32 q9, q13, d1[0] \n"
"vmla.f32 q8, q14, d1[1] \n"
"vmla.f32 q9, q15, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = tiles & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #256] \n"
"vld1.f32 {d8-d11}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"vmla.f32 q9, q5, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0]! \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
float sum2 = 0.f;
float sum3 = 0.f;
float sum4 = 0.f;
float sum5 = 0.f;
float sum6 = 0.f;
float sum7 = 0.f;
for (int q=0; q<inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
sum1 += bb2p0[1] * ktm0[0];
sum2 += bb2p0[2] * ktm0[0];
sum3 += bb2p0[3] * ktm0[0];
sum4 += bb2p0[4] * ktm0[0];
sum5 += bb2p0[5] * ktm0[0];
sum6 += bb2p0[6] * ktm0[0];
sum7 += bb2p0[7] * ktm0[0];
bb2p0 += 8;
ktm0 += 1;
}
output0_tm[0] = sum0;
output0_tm[1] = sum1;
output0_tm[2] = sum2;
output0_tm[3] = sum3;
output0_tm[4] = sum4;
output0_tm[5] = sum5;
output0_tm[6] = sum6;
output0_tm[7] = sum7;
output0_tm += 8;
#endif // __ARM_NEON
}
for (; i+3<tiles; i+=4)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4);
const float* ktm0 = kernel_tm0.row(r);
#if __ARM_NEON
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
// inch loop
"lsr w4, %w6, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v8.4s, v5.4s, v0.s[1] \n"
"fmla v8.4s, v6.4s, v0.s[2] \n"
"fmla v8.4s, v7.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w6, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #32] \n"
"ld1r {v0.4s}, [%5], #4 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8"
);
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
// inch loop
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = tiles & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4]! \n"
"pld [%5, #32] \n"
"vld1.f32 {d0[],d1[]}, [%5]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0]! \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8"
);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
float sum2 = 0.f;
float sum3 = 0.f;
for (int q=0; q<inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
sum1 += bb2p0[1] * ktm0[0];
sum2 += bb2p0[2] * ktm0[0];
sum3 += bb2p0[3] * ktm0[0];
bb2p0 += 4;
ktm0 += 1;
}
output0_tm[0] = sum0;
output0_tm[1] = sum1;
output0_tm[2] = sum2;
output0_tm[3] = sum3;
output0_tm += 4;
#endif // __ARM_NEON
}
for (; i<tiles; i++)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4);
const float* ktm0 = kernel_tm0.row(r);
int q=0;
#if __ARM_NEON
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (; q+3<inch; q+=4)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :);
float32x4_t _bb2p0 = vld1q_f32(bb2p0);
bb2p0 += 4;
float32x4_t _ktm0 = vld1q_f32(ktm0);
ktm0 += 4;
_sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0);
}
#if __aarch64__
float sum0 = vaddvq_f32(_sum0);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float sum0 = vget_lane_f32(vpadd_f32(_ss0, _ss0), 0);
#endif // __aarch64__
#else
float sum0 = 0.f;
#endif
for (; q<inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
bb2p0 += 1;
ktm0 += 1;
}
output0_tm[0] = sum0;
output0_tm += 1;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#if __ARM_NEON
const float coeff[4] = { 4.f, 8.f, 16.f, 32.f };
float32x4_t _coeff = vld1q_f32(coeff);
#endif // __ARM_NEON
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
#if __ARM_NEON
float32x2_t _bias0 = vdup_n_f32(bias0);
#endif // __ARM_NEON
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
#if __ARM_NEON
#if __aarch64__
const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles*8);
const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles*16);
const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles*24);
for (int m=0; m+3<8; m+=4)
{
float32x4_t _output0_tm_00;
float32x4_t _output0_tm_11;
float32x4_t _output0_tm_22;
float32x4_t _output0_tm_33;
float32x4_t _output0_tm_44;
float32x4_t _output0_tm_55;
float32x4_t _output0_tm_66;
float32x4_t _output0_tm_77;
_output0_tm_00 = vsetq_lane_f32(output0_tm0[0], _output0_tm_00, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm1[0], _output0_tm_00, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm2[0], _output0_tm_00, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm3[0], _output0_tm_00, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm0[0], _output0_tm_11, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm1[0], _output0_tm_11, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm2[0], _output0_tm_11, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm3[0], _output0_tm_11, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm0[0], _output0_tm_22, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm1[0], _output0_tm_22, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm2[0], _output0_tm_22, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm3[0], _output0_tm_22, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm0[0], _output0_tm_33, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm1[0], _output0_tm_33, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm2[0], _output0_tm_33, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm3[0], _output0_tm_33, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm0[0], _output0_tm_44, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm1[0], _output0_tm_44, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm2[0], _output0_tm_44, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm3[0], _output0_tm_44, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm0[0], _output0_tm_55, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm1[0], _output0_tm_55, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm2[0], _output0_tm_55, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm3[0], _output0_tm_55, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm0[0], _output0_tm_66, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm1[0], _output0_tm_66, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm2[0], _output0_tm_66, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm3[0], _output0_tm_66, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_77 = vsetq_lane_f32(output0_tm0[0], _output0_tm_77, 0);
_output0_tm_77 = vsetq_lane_f32(output0_tm1[0], _output0_tm_77, 1);
_output0_tm_77 = vsetq_lane_f32(output0_tm2[0], _output0_tm_77, 2);
_output0_tm_77 = vsetq_lane_f32(output0_tm3[0], _output0_tm_77, 3);
float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a);
_tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1);
_tmp0 = vaddq_f32(_tmp0, _tmp024b);
float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1);
float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[2][m], _tmp2);
vst1q_f32(&tmp[4][m], _tmp4);
float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0);
float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a);
_tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1);
_tmp5 = vaddq_f32(_tmp5, _tmp135c);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[5][m], _tmp5);
output0_tm0 += out0_tm.w*tiles*25;
output0_tm1 += out0_tm.w*tiles*25;
output0_tm2 += out0_tm.w*tiles*25;
output0_tm3 += out0_tm.w*tiles*25;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
for (int m=0; m+1<6; m+=2)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]);
float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]);
float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]);
float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]);
float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]);
float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]);
float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]);
float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]);
float32x2_t _tmp024a = vadd_f32(_t_11, _t_22);
float32x2_t _tmp135a = vsub_f32(_t_11, _t_22);
float32x2_t _tmp024b = vadd_f32(_t_33, _t_44);
float32x2_t _tmp135b = vsub_f32(_t_33, _t_44);
float32x2_t _tmp024c = vadd_f32(_t_55, _t_66);
float32x2_t _tmp135c = vsub_f32(_t_55, _t_66);
float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a);
_output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1);
_output_0 = vadd_f32(_output_0, _tmp024b);
_output_0 = vadd_f32(_output_0, _bias0);
float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1);
_output_2 = vadd_f32(_output_2, _bias0);
float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _bias0);
output0[0] = vget_lane_f32(_output_0, 0);
output1[0] = vget_lane_f32(_output_0, 1);
output0[2] = vget_lane_f32(_output_2, 0);
output1[2] = vget_lane_f32(_output_2, 1);
output0[4] = vget_lane_f32(_output_4, 0);
output1[4] = vget_lane_f32(_output_4, 1);
float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _bias0);
float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0);
_output_3 = vadd_f32(_output_3, _bias0);
float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a);
_output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1);
_output_5 = vadd_f32(_output_5, _tmp135c);
_output_5 = vadd_f32(_output_5, _bias0);
output0[1] = vget_lane_f32(_output_1, 0);
output1[1] = vget_lane_f32(_output_1, 1);
output0[3] = vget_lane_f32(_output_3, 0);
output1[3] = vget_lane_f32(_output_3, 1);
output0[5] = vget_lane_f32(_output_5, 0);
output1[5] = vget_lane_f32(_output_5, 1);
t0 += 8*2;
t1 += 8*2;
output0 += outw*2;
output1 += outw*2;
}
#else // __aarch64__
const float* output0_tm0_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm1_0 = out0_tm.row(i * w_tm/8 + j + tiles*8);
const float* output0_tm2_0 = out0_tm.row(i * w_tm/8 + j + tiles*16);
const float* output0_tm3_0 = out0_tm.row(i * w_tm/8 + j + tiles*24);
const float* output0_tm0_4 = out0_tm.row(i * w_tm/8 + j + tiles*32);
const float* output0_tm1_4 = out0_tm.row(i * w_tm/8 + j + tiles*40);
const float* output0_tm2_4 = out0_tm.row(i * w_tm/8 + j + tiles*48);
const float* output0_tm3_4 = out0_tm.row(i * w_tm/8 + j + tiles*56);
float* t0 = tmp[0];
float* t1 = tmp[1];
// int step = out0_tm.w * tiles * 2*4 *4;
int step = out0_tm.w * tiles *4;
asm volatile(
// loop0
// "vld1.f32 {d16-d17}, [%2], %21 \n"
// "vld1.f32 {d18-d19}, [%3], %21 \n"
// "vld1.f32 {d20-d21}, [%4], %21 \n"
// "vld1.f32 {d22-d23}, [%5], %21 \n"
// "vld1.f32 {d24-d25}, [%6], %21 \n"
// "vld1.f32 {d26-d27}, [%7], %21 \n"
// "vld1.f32 {d28-d29}, [%8], %21 \n"
// "vld1.f32 {d30-d31}, [%9], %21 \n"
// "vtrn.32 q8, q10 \n"
// "vtrn.32 q9, q11 \n"
// "vtrn.32 q12, q14 \n"
// "vtrn.32 q13, q15 \n"
// "vswp d17, d24 \n"
// "vswp d19, d26 \n"
// "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
// "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vld1.f32 {d16[0]}, [%2], %21 \n"
"vld1.f32 {d16[1]}, [%3], %21 \n"
"vld1.f32 {d17[0]}, [%4], %21 \n"
"vld1.f32 {d17[1]}, [%5], %21 \n"
"vld1.f32 {d20[0]}, [%2], %21 \n"
"vld1.f32 {d20[1]}, [%3], %21 \n"
"vld1.f32 {d21[0]}, [%4], %21 \n"
"vld1.f32 {d21[1]}, [%5], %21 \n"
"vld1.f32 {d24[0]}, [%2], %21 \n"
"vld1.f32 {d24[1]}, [%3], %21 \n"
"vld1.f32 {d25[0]}, [%4], %21 \n"
"vld1.f32 {d25[1]}, [%5], %21 \n"
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vld1.f32 {d28[0]}, [%2], %21 \n"
"vld1.f32 {d28[1]}, [%3], %21 \n"
"vld1.f32 {d29[0]}, [%4], %21 \n"
"vld1.f32 {d29[1]}, [%5], %21 \n"
"vld1.f32 {d18[0]}, [%2], %21 \n"
"vld1.f32 {d18[1]}, [%3], %21 \n"
"vld1.f32 {d19[0]}, [%4], %21 \n"
"vld1.f32 {d19[1]}, [%5], %21 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vld1.f32 {d22[0]}, [%2], %21 \n"
"vld1.f32 {d22[1]}, [%3], %21 \n"
"vld1.f32 {d23[0]}, [%4], %21 \n"
"vld1.f32 {d23[1]}, [%5], %21 \n"
"vld1.f32 {d26[0]}, [%2], %21 \n"
"vld1.f32 {d26[1]}, [%3], %21 \n"
"vld1.f32 {d27[0]}, [%4], %21 \n"
"vld1.f32 {d27[1]}, [%5], %21 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vld1.f32 {d30[0]}, [%2] \n"
"vld1.f32 {d30[1]}, [%3] \n"
"vld1.f32 {d31[0]}, [%4] \n"
"vld1.f32 {d31[1]}, [%5] \n"
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"sub %0, %0, #112 \n"
"vst1.f32 {d30-d31}, [%1] \n"
"sub %1, %1, #112 \n"
// loop1
// "vld1.f32 {d16-d17}, [%2] \n"
// "vld1.f32 {d18-d19}, [%3] \n"
// "vld1.f32 {d20-d21}, [%4] \n"
// "vld1.f32 {d22-d23}, [%5] \n"
// "vld1.f32 {d24-d25}, [%6] \n"
// "vld1.f32 {d26-d27}, [%7] \n"
// "vld1.f32 {d28-d29}, [%8] \n"
// "vld1.f32 {d30-d31}, [%9] \n"
// "vtrn.32 q8, q10 \n"
// "vtrn.32 q9, q11 \n"
// "vtrn.32 q12, q14 \n"
// "vtrn.32 q13, q15 \n"
// "vswp d17, d24 \n"
// "vswp d19, d26 \n"
// "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
// "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vld1.f32 {d16[0]}, [%6], %21 \n"
"vld1.f32 {d16[1]}, [%7], %21 \n"
"vld1.f32 {d17[0]}, [%8], %21 \n"
"vld1.f32 {d17[1]}, [%9], %21 \n"
"vld1.f32 {d20[0]}, [%6], %21 \n"
"vld1.f32 {d20[1]}, [%7], %21 \n"
"vld1.f32 {d21[0]}, [%8], %21 \n"
"vld1.f32 {d21[1]}, [%9], %21 \n"
"vld1.f32 {d24[0]}, [%6], %21 \n"
"vld1.f32 {d24[1]}, [%7], %21 \n"
"vld1.f32 {d25[0]}, [%8], %21 \n"
"vld1.f32 {d25[1]}, [%9], %21 \n"
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vld1.f32 {d28[0]}, [%6], %21 \n"
"vld1.f32 {d28[1]}, [%7], %21 \n"
"vld1.f32 {d29[0]}, [%8], %21 \n"
"vld1.f32 {d29[1]}, [%9], %21 \n"
"vld1.f32 {d18[0]}, [%6], %21 \n"
"vld1.f32 {d18[1]}, [%7], %21 \n"
"vld1.f32 {d19[0]}, [%8], %21 \n"
"vld1.f32 {d19[1]}, [%9], %21 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vld1.f32 {d22[0]}, [%6], %21 \n"
"vld1.f32 {d22[1]}, [%7], %21 \n"
"vld1.f32 {d23[0]}, [%8], %21 \n"
"vld1.f32 {d23[1]}, [%9], %21 \n"
"vld1.f32 {d26[0]}, [%6], %21 \n"
"vld1.f32 {d26[1]}, [%7], %21 \n"
"vld1.f32 {d27[0]}, [%8], %21 \n"
"vld1.f32 {d27[1]}, [%9], %21 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vld1.f32 {d30[0]}, [%6] \n"
"vld1.f32 {d30[1]}, [%7] \n"
"vld1.f32 {d31[0]}, [%8] \n"
"vld1.f32 {d31[1]}, [%9] \n"
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"vst1.f32 {d30-d31}, [%1] \n"
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(output0_tm0_0), // %2
"=r"(output0_tm1_0), // %3
"=r"(output0_tm2_0), // %4
"=r"(output0_tm3_0), // %5
"=r"(output0_tm0_4), // %6
"=r"(output0_tm1_4), // %7
"=r"(output0_tm2_4), // %8
"=r"(output0_tm3_4) // %9
: "0"(t0),
"1"(t1),
"2"(output0_tm0_0),
"3"(output0_tm1_0),
"4"(output0_tm2_0),
"5"(output0_tm3_0),
"6"(output0_tm0_4),
"7"(output0_tm1_4),
"8"(output0_tm2_4),
"9"(output0_tm3_4),
"w"(_coeff), // %20
"r"(step) // %21
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
int stepw = outw*2 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop1
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop2
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(t0), // %2
"=r"(t1) // %3
: "0"(output0),
"1"(output1),
"2"(t0),
"3"(t1),
"w"(_coeff), // %8
"w"(_bias0), // %9
"r"(stepw) // %10
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* output0_tm_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm_1 = out0_tm.row(i * w_tm/8 + j + tiles);
const float* output0_tm_2 = out0_tm.row(i * w_tm/8 + j + tiles*2);
const float* output0_tm_3 = out0_tm.row(i * w_tm/8 + j + tiles*3);
const float* output0_tm_4 = out0_tm.row(i * w_tm/8 + j + tiles*4);
const float* output0_tm_5 = out0_tm.row(i * w_tm/8 + j + tiles*5);
const float* output0_tm_6 = out0_tm.row(i * w_tm/8 + j + tiles*6);
const float* output0_tm_7 = out0_tm.row(i * w_tm/8 + j + tiles*7);
for (int m=0; m<8; m++)
{
float tmp024a = output0_tm_1[0] + output0_tm_2[0];
float tmp135a = output0_tm_1[0] - output0_tm_2[0];
float tmp024b = output0_tm_3[0] + output0_tm_4[0];
float tmp135b = output0_tm_3[0] - output0_tm_4[0];
float tmp024c = output0_tm_5[0] + output0_tm_6[0];
float tmp135c = output0_tm_5[0] - output0_tm_6[0];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += out0_tm.w * tiles * 8;
output0_tm_1 += out0_tm.w * tiles * 8;
output0_tm_2 += out0_tm.w * tiles * 8;
output0_tm_3 += out0_tm.w * tiles * 8;
output0_tm_4 += out0_tm.w * tiles * 8;
output0_tm_5 += out0_tm.w * tiles * 8;
output0_tm_6 += out0_tm.w * tiles * 8;
output0_tm_7 += out0_tm.w * tiles * 8;
}
float* output0 = out0.row(i * 6) + j * 6;
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
#endif // __ARM_NEON
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads);
}
static void conv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
const float* k0 = kernel + p*inch*9;
const float* k1 = kernel + (p+1)*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k03 = vld1q_f32(k0+3);
float32x4_t _k06 = vld1q_f32(k0+6);
float32x4_t _k10 = vld1q_f32(k1);
float32x4_t _k13 = vld1q_f32(k1+3);
float32x4_t _k16 = vld1q_f32(k1+6);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v6.4s}, [%1] \n"// v6 = _sum0
"fmul v12.4s, v8.4s, %12.s[0] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v7.4s}, [%2] \n"// v7 = _sum1
"fmul v13.4s, v8.4s, %15.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld2 {v10.4s, v11.4s}, [%3] \n"// v10
"fmla v6.4s, v9.4s, %12.s[1] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v7.4s, v9.4s, %15.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4], #32 \n"// r1
"fmla v12.4s, v14.4s, %12.s[2] \n"
"fmla v13.4s, v14.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld2 {v10.4s, v11.4s}, [%4] \n"
"fmla v6.4s, v8.4s, %13.s[0] \n"
"fmla v7.4s, v8.4s, %16.s[0] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v12.4s, v9.4s, %13.s[1] \n"
"fmla v13.4s, v9.4s, %16.s[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v8.4s, v9.4s}, [%5], #32 \n"// r2
"fmla v6.4s, v14.4s, %13.s[2] \n"
"fmla v7.4s, v14.4s, %16.s[2] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld2 {v10.4s, v11.4s}, [%5] \n"
"fmla v12.4s, v8.4s, %14.s[0] \n"
"fmla v13.4s, v8.4s, %17.s[0] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v6.4s, v9.4s, %14.s[1] \n"
"fmla v7.4s, v9.4s, %17.s[1] \n"
"fmla v12.4s, v14.4s, %14.s[2] \n"
"fmla v13.4s, v14.4s, %17.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0
"fadd v6.4s, v6.4s, v12.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%2], #16 \n"
"bne 0b \n"
"sub %3, %3, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%3, #256] \n"
"vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1] \n"// q6 = _sum0
"vmul.f32 q12, q8, %e12[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2] \n"// q7 = _sum1
"vmul.f32 q13, q8, %e15[0] \n"
"pld [%3, #128] \n"
"vld2.f32 {d20-d21}, [%3] \n"// q10
"vmla.f32 q6, q9, %e12[1] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q7, q9, %e15[1] \n"
"pld [%4, #256] \n"
"vld2.f32 {d16-d19}, [%4]! \n"// r1
"vmla.f32 q12, q11, %f12[0] \n"
"vmla.f32 q13, q11, %f15[0] \n"
"pld [%4, #128] \n"
"vld2.f32 {d20-d21}, [%4] \n"
"vmla.f32 q6, q8, %e13[0] \n"
"vmla.f32 q7, q8, %e16[0] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q12, q9, %e13[1] \n"
"vmla.f32 q13, q9, %e16[1] \n"
"pld [%5, #256] \n"
"vld2.f32 {d16-d19}, [%5]! \n"// r2
"vmla.f32 q6, q11, %f13[0] \n"
"vmla.f32 q7, q11, %f16[0] \n"
"pld [%5, #128] \n"
"vld2.f32 {d20-d21}, [%5] \n"
"vmla.f32 q12, q8, %e14[0] \n"
"vmla.f32 q13, q8, %e17[0] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q6, q9, %e14[1] \n"
"vmla.f32 q7, q9, %e17[1] \n"
"vmla.f32 q12, q11, %f14[0] \n"
"vmla.f32 q13, q11, %f17[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0
"vadd.f32 q6, q6, q12 \n"
"vadd.f32 q7, q7, q13 \n"
"subs %0, #1 \n"
"vst1.f32 {d12-d13}, [%1]! \n"
"vst1.f32 {d14-d15}, [%2]! \n"
"bne 0b \n"
"sub %3, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
outptr1++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9;
k1 += 9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* kernel0 = kernel + p*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k3456 = vld1q_f32(k1);
float32x4_t _k6789 = vld1q_f32(k2);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"fmla v0.4s, v2.4s, %10.s[0] \n"
"fmul v10.4s, v3.4s, %10.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmul v11.4s, v1.4s, %10.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v2.4s, v3.4s}, [%3], #32 \n"
"fmla v0.4s, v2.4s, %11.s[0] \n"
"fmla v10.4s, v3.4s, %11.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %11.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v2.4s, v3.4s}, [%4], #32 \n"
"fmla v0.4s, v2.4s, %12.s[0] \n"
"fmla v10.4s, v3.4s, %12.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %12.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"fadd v0.4s, v0.4s, v10.4s \n"
"fadd v0.4s, v0.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v0.4s}, [%1], #16 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1] \n"
"vmla.f32 q0, q2, %e10[0] \n"
"vmul.f32 q10, q3, %e10[1] \n"
"pld [%2, #128] \n"
"vld2.f32 {d16-d17}, [%2] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmul.f32 q11, q1, %f10[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vmla.f32 q0, q2, %e11[0] \n"
"vmla.f32 q10, q3, %e11[1] \n"
"pld [%3, #128] \n"
"vld2.f32 {d16-d17}, [%3] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f11[0] \n"
"pld [%4, #256] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vmla.f32 q0, q2, %e12[0] \n"
"vmla.f32 q10, q3, %e12[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d16-d17}, [%4] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f12[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vadd.f32 q0, q0, q10 \n"
"vadd.f32 q0, q0, q11 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
static void conv3x3s2_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(8*9, inch, outch/8 + outch%8);
const float* kernel = _kernel;
int p=0;
for (; p+7<outch; p+=8)
{
const float* k0 = kernel + (p+0)*inch*9;
const float* k1 = kernel + (p+1)*inch*9;
const float* k2 = kernel + (p+2)*inch*9;
const float* k3 = kernel + (p+3)*inch*9;
const float* k4 = kernel + (p+4)*inch*9;
const float* k5 = kernel + (p+5)*inch*9;
const float* k6 = kernel + (p+6)*inch*9;
const float* k7 = kernel + (p+7)*inch*9;
float* ktmp = kernel_tm.channel(p/8);
for (int q=0; q<inch; q++)
{
for (int k=0; k<9; k++)
{
ktmp[0] = k0[k];
ktmp[1] = k1[k];
ktmp[2] = k2[k];
ktmp[3] = k3[k];
ktmp[4] = k4[k];
ktmp[5] = k5[k];
ktmp[6] = k6[k];
ktmp[7] = k7[k];
ktmp += 8;
}
k0 += 9;
k1 += 9;
k2 += 9;
k3 += 9;
k4 += 9;
k5 += 9;
k6 += 9;
k7 += 9;
}
}
for (; p<outch; p++)
{
const float* k0 = kernel + (p+0)*inch*9;
float* ktmp = kernel_tm.channel(p/8 + p%8);
for (int q=0; q<inch; q++)
{
for (int k=0; k<9; k++)
{
ktmp[k] = k0[k];
}
ktmp += 9;
k0 += 9;
}
}
}
static void conv3x3s2_packed_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
// const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
Mat out0 = top_blob.channel(p+0);
Mat out1 = top_blob.channel(p+1);
Mat out2 = top_blob.channel(p+2);
Mat out3 = top_blob.channel(p+3);
Mat out4 = top_blob.channel(p+4);
Mat out5 = top_blob.channel(p+5);
Mat out6 = top_blob.channel(p+6);
Mat out7 = top_blob.channel(p+7);
const float bias0 = bias ? bias[p+0] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
const float bias2 = bias ? bias[p+2] : 0.f;
const float bias3 = bias ? bias[p+3] : 0.f;
const float bias4 = bias ? bias[p+4] : 0.f;
const float bias5 = bias ? bias[p+5] : 0.f;
const float bias6 = bias ? bias[p+6] : 0.f;
const float bias7 = bias ? bias[p+7] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
out2.fill(bias2);
out3.fill(bias3);
out4.fill(bias4);
out5.fill(bias5);
out6.fill(bias6);
out7.fill(bias7);
const float* ktmp = _kernel.channel(p/8);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr2 = out2;
float* outptr3 = out3;
float* outptr4 = out4;
float* outptr5 = out5;
float* outptr6 = out6;
float* outptr7 = out7;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v8.4s}, [%1] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v9.4s}, [%2] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v10.4s}, [%3] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v11.4s}, [%4] \n"
///
"prfm pldl1keep, [%9, #256] \n"
"ld2 {v4.4s, v5.4s}, [%9], #32 \n"// v4=00 v5=01
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v12.4s}, [%5] \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v13.4s}, [%6] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v14.4s}, [%7] \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v15.4s}, [%8] \n"
"ld1 {v2.4s, v3.4s}, [%12], #32 \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld2 {v6.4s, v7.4s}, [%9] \n"// v6
"fmla v8.4s, v5.4s, v2.s[0] \n"
"fmla v9.4s, v5.4s, v2.s[1] \n"
"fmla v10.4s, v5.4s, v2.s[2] \n"
"fmla v11.4s, v5.4s, v2.s[3] \n"
"ext v6.16b, v4.16b, v6.16b, #4 \n"// v6=02
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v12.4s, v5.4s, v3.s[0] \n"
"fmla v13.4s, v5.4s, v3.s[1] \n"
"fmla v14.4s, v5.4s, v3.s[2] \n"
"fmla v15.4s, v5.4s, v3.s[3] \n"
///
"prfm pldl1keep, [%10, #256] \n"
"ld2 {v4.4s, v5.4s}, [%10], #32 \n"// v4=10 v5=11
"fmla v8.4s, v6.4s, v0.s[0] \n"
"fmla v9.4s, v6.4s, v0.s[1] \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v6.4s, v0.s[3] \n"
"ld1 {v2.4s, v3.4s}, [%12], #32 \n"
"fmla v12.4s, v6.4s, v1.s[0] \n"
"fmla v13.4s, v6.4s, v1.s[1] \n"
"fmla v14.4s, v6.4s, v1.s[2] \n"
"fmla v15.4s, v6.4s, v1.s[3] \n"
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld2 {v6.4s, v7.4s}, [%10] \n"// v6
"fmla v8.4s, v5.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[1] \n"
"fmla v10.4s, v5.4s, v0.s[2] \n"
"fmla v11.4s, v5.4s, v0.s[3] \n"
"ld1 {v2.4s, v3.4s}, [%12], #32 \n"
"ext v6.16b, v4.16b, v6.16b, #4 \n"// v6=12
"fmla v12.4s, v5.4s, v1.s[0] \n"
"fmla v13.4s, v5.4s, v1.s[1] \n"
"fmla v14.4s, v5.4s, v1.s[2] \n"
"fmla v15.4s, v5.4s, v1.s[3] \n"
///
"prfm pldl1keep, [%11, #256] \n"
"ld2 {v4.4s, v5.4s}, [%11], #32 \n"// v4=20 v5=21
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v12.4s, v6.4s, v3.s[0] \n"
"fmla v13.4s, v6.4s, v3.s[1] \n"
"fmla v14.4s, v6.4s, v3.s[2] \n"
"fmla v15.4s, v6.4s, v3.s[3] \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"ld1 {v2.4s, v3.4s}, [%12], #32 \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"prfm pldl1keep, [%11, #256] \n"
"ld2 {v6.4s, v7.4s}, [%11] \n"// v6
"fmla v8.4s, v5.4s, v2.s[0] \n"
"fmla v9.4s, v5.4s, v2.s[1] \n"
"fmla v10.4s, v5.4s, v2.s[2] \n"
"fmla v11.4s, v5.4s, v2.s[3] \n"
"ext v6.16b, v4.16b, v6.16b, #4 \n"// v6=22
"ld1 {v0.4s, v1.4s}, [%12], #32 \n"
"fmla v12.4s, v5.4s, v3.s[0] \n"
"fmla v13.4s, v5.4s, v3.s[1] \n"
"fmla v14.4s, v5.4s, v3.s[2] \n"
"fmla v15.4s, v5.4s, v3.s[3] \n"
"fmla v8.4s, v6.4s, v0.s[0] \n"
"fmla v9.4s, v6.4s, v0.s[1] \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v6.4s, v0.s[3] \n"
"fmla v12.4s, v6.4s, v1.s[0] \n"
"fmla v13.4s, v6.4s, v1.s[1] \n"
"st1 {v8.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%2], #16 \n"
"fmla v14.4s, v6.4s, v1.s[2] \n"
"fmla v15.4s, v6.4s, v1.s[3] \n"
"st1 {v10.4s}, [%3], #16 \n"
"st1 {v11.4s}, [%4], #16 \n"
"sub %12, %12, #288 \n"
"st1 {v12.4s}, [%5], #16 \n"
"st1 {v13.4s}, [%6], #16 \n"
"subs %w0, %w0, #1 \n"
"st1 {v14.4s}, [%7], #16 \n"
"st1 {v15.4s}, [%8], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(ktmp) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(ktmp)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else // __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d16-d17}, [%1] \n"
"pld [%2, #128] \n"
"vld1.f32 {d18-d19}, [%2] \n"
"pld [%3, #128] \n"
"vld1.f32 {d20-d21}, [%3] \n"
"pld [%4, #128] \n"
"vld1.f32 {d22-d23}, [%4] \n"
///
"pld [%9, #256] \n"
"vld2.f32 {d8-d11}, [%9]! \n"// q4=00 q5=01
"vld1.f32 {d0-d3}, [%12 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"pld [%5, #128] \n"
"vld1.f32 {d24-d25}, [%5] \n"
"pld [%6, #128] \n"
"vld1.f32 {d26-d27}, [%6] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"pld [%7, #128] \n"
"vld1.f32 {d28-d29}, [%7] \n"
"pld [%8, #128] \n"
"vld1.f32 {d30-d31}, [%8] \n"
"vld1.f32 {d4-d7}, [%12 :128]! \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"pld [%9, #128] \n"
"vld2.f32 {d12-d13}, [%9] \n"// q6
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vext.f32 q6, q4, q6, #1 \n"// q6=02
"vld1.f32 {d0-d3}, [%12 :128]! \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
///
"pld [%10, #256] \n"
"vld2.f32 {d8-d11}, [%10]! \n"// q4=10 q5=11
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vld1.f32 {d4-d7}, [%12 :128]! \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q9, q4, d4[1] \n"
"vmla.f32 q10, q4, d5[0] \n"
"vmla.f32 q11, q4, d5[1] \n"
"vld1.f32 {d0-d3}, [%12 :128]! \n"
"vmla.f32 q12, q4, d6[0] \n"
"vmla.f32 q13, q4, d6[1] \n"
"vmla.f32 q14, q4, d7[0] \n"
"vmla.f32 q15, q4, d7[1] \n"
"pld [%10, #128] \n"
"vld2.f32 {d12-d13}, [%10] \n"// q6
"vmla.f32 q8, q5, d0[0] \n"
"vmla.f32 q9, q5, d0[1] \n"
"vmla.f32 q10, q5, d1[0] \n"
"vmla.f32 q11, q5, d1[1] \n"
"vld1.f32 {d4-d7}, [%12 :128]! \n"
"vext.f32 q6, q4, q6, #1 \n"// q6=12
"vmla.f32 q12, q5, d2[0] \n"
"vmla.f32 q13, q5, d2[1] \n"
"vmla.f32 q14, q5, d3[0] \n"
"vmla.f32 q15, q5, d3[1] \n"
///
"pld [%11, #256] \n"
"vld2.f32 {d8-d11}, [%11]! \n"// q4=20 q5=21
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vld1.f32 {d0-d3}, [%12 :128]! \n"
"vmla.f32 q12, q6, d6[0] \n"
"vmla.f32 q13, q6, d6[1] \n"
"vmla.f32 q14, q6, d7[0] \n"
"vmla.f32 q15, q6, d7[1] \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vld1.f32 {d4-d7}, [%12 :128]! \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"pld [%11, #128] \n"
"vld2.f32 {d12-d13}, [%11] \n"// q6
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vext.f32 q6, q4, q6, #1 \n"// q6=22
"vld1.f32 {d0-d3}, [%12 :128]! \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"
"vst1.f32 {d18-d19}, [%2]! \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"vst1.f32 {d20-d21}, [%3]! \n"
"vst1.f32 {d22-d23}, [%4]! \n"
"sub %12, %12, #288 \n"
"vst1.f32 {d24-d25}, [%5]! \n"
"vst1.f32 {d26-d27}, [%6]! \n"
"subs %0, #1 \n"
"vst1.f32 {d28-d29}, [%7]! \n"
"vst1.f32 {d30-d31}, [%8]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(r0), // %9
"=r"(r1), // %10
"=r"(r2), // %11
"=r"(ktmp) // %12
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(r0),
"10"(r1),
"11"(r2),
"12"(ktmp)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v0.4s}, [%8] \n"
"ld1 {v12.4s, v13.4s}, [%11], #32 \n"
"ld1 {v8.s}[0], [%0] \n"
"ld1 {v8.s}[1], [%1] \n"
"ld1 {v8.s}[2], [%2] \n"
"ld1 {v8.s}[3], [%3] \n"
"fmul v14.4s, v10.4s, v0.s[0] \n"
"fmul v15.4s, v11.4s, v0.s[0] \n"
"ld1 {v9.s}[0], [%4] \n"
"ld1 {v9.s}[1], [%5] \n"
"ld1 {v9.s}[2], [%6] \n"
"ld1 {v9.s}[3], [%7] \n"
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"fmla v8.4s, v12.4s, v0.s[1] \n"
"fmla v9.4s, v13.4s, v0.s[1] \n"
"ld1 {v12.4s, v13.4s}, [%11], #32 \n"
"fmla v14.4s, v10.4s, v0.s[2] \n"
"fmla v15.4s, v11.4s, v0.s[2] \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v1.4s}, [%9] \n"
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"fmla v8.4s, v12.4s, v1.s[0] \n"
"fmla v9.4s, v13.4s, v1.s[0] \n"
"ld1 {v12.4s, v13.4s}, [%11], #32 \n"
"fmla v14.4s, v10.4s, v1.s[1] \n"
"fmla v15.4s, v11.4s, v1.s[1] \n"
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"fmla v8.4s, v12.4s, v1.s[2] \n"
"fmla v9.4s, v13.4s, v1.s[2] \n"
"prfm pldl1keep, [%10, #128] \n"
"ld1 {v0.4s}, [%10] \n"
"ld1 {v12.4s, v13.4s}, [%11], #32 \n"
"fmla v14.4s, v10.4s, v0.s[0] \n"
"fmla v15.4s, v11.4s, v0.s[0] \n"
"ld1 {v10.4s, v11.4s}, [%11], #32 \n"
"fmla v8.4s, v12.4s, v0.s[1] \n"
"fmla v9.4s, v13.4s, v0.s[1] \n"
"fmla v14.4s, v10.4s, v0.s[2] \n"
"fmla v15.4s, v11.4s, v0.s[2] \n"
"fadd v8.4s, v8.4s, v14.4s \n"
"fadd v9.4s, v9.4s, v15.4s \n"
"sub %11, %11, #288 \n"
"st1 {v8.s}[0], [%0], #4 \n"
"st1 {v8.s}[1], [%1], #4 \n"
"st1 {v8.s}[2], [%2], #4 \n"
"st1 {v8.s}[3], [%3], #4 \n"
"st1 {v9.s}[0], [%4], #4 \n"
"st1 {v9.s}[1], [%5], #4 \n"
"st1 {v9.s}[2], [%6], #4 \n"
"st1 {v9.s}[3], [%7], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(ktmp) // %11
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(ktmp)
: "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"pld [%8, #128] \n"
"vld1.f32 {d0-d1}, [%8] \n"
"vld1.f32 {d24-d27}, [%11 :128]! \n"
"vld1.f32 {d16[0]}, [%0] \n"
"vld1.f32 {d16[1]}, [%1] \n"
"vld1.f32 {d17[0]}, [%2] \n"
"vld1.f32 {d17[1]}, [%3] \n"
"vmul.f32 q14, q10, d0[0] \n"
"vmul.f32 q15, q11, d0[0] \n"
"vld1.f32 {d18[0]}, [%4] \n"
"vld1.f32 {d18[1]}, [%5] \n"
"vld1.f32 {d19[0]}, [%6] \n"
"vld1.f32 {d19[1]}, [%7] \n"
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"vmla.f32 q8, q12, d0[1] \n"
"vmla.f32 q9, q13, d0[1] \n"
"vld1.f32 {d24-d27}, [%11 :128]! \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q11, d1[0] \n"
"pld [%9, #128] \n"
"vld1.f32 {d2-d3}, [%9] \n"
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"vmla.f32 q8, q12, d2[0] \n"
"vmla.f32 q9, q13, d2[0] \n"
"vld1.f32 {d24-d27}, [%11 :128]! \n"
"vmla.f32 q14, q10, d2[1] \n"
"vmla.f32 q15, q11, d2[1] \n"
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"vmla.f32 q8, q12, d3[0] \n"
"vmla.f32 q9, q13, d3[0] \n"
"pld [%10, #128] \n"
"vld1.f32 {d0-d1}, [%10] \n"
"vld1.f32 {d24-d27}, [%11 :128]! \n"
"vmla.f32 q14, q10, d0[0] \n"
"vmla.f32 q15, q11, d0[0] \n"
"vld1.f32 {d20-d23}, [%11 :128]! \n"
"vmla.f32 q8, q12, d0[1] \n"
"vmla.f32 q9, q13, d0[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q11, d1[0] \n"
"vadd.f32 q8, q8, q14 \n"
"vadd.f32 q9, q9, q15 \n"
"sub %11, %11, #288 \n"
"vst1.f32 {d16[0]}, [%0]! \n"
"vst1.f32 {d16[1]}, [%1]! \n"
"vst1.f32 {d17[0]}, [%2]! \n"
"vst1.f32 {d17[1]}, [%3]! \n"
"vst1.f32 {d18[0]}, [%4]! \n"
"vst1.f32 {d18[1]}, [%5]! \n"
"vst1.f32 {d19[0]}, [%6]! \n"
"vst1.f32 {d19[1]}, [%7]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(ktmp) // %11
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(ktmp)
: "memory", "q0", "q1", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else // __ARM_NEON
float sum0 = 0.f;
float sum1 = 0.f;
float sum2 = 0.f;
float sum3 = 0.f;
float sum4 = 0.f;
float sum5 = 0.f;
float sum6 = 0.f;
float sum7 = 0.f;
sum0 += r0[0] * ktmp[0];
sum1 += r0[0] * ktmp[1];
sum2 += r0[0] * ktmp[2];
sum3 += r0[0] * ktmp[3];
sum4 += r0[0] * ktmp[4];
sum5 += r0[0] * ktmp[5];
sum6 += r0[0] * ktmp[6];
sum7 += r0[0] * ktmp[7];
ktmp += 8;
sum0 += r0[1] * ktmp[0];
sum1 += r0[1] * ktmp[1];
sum2 += r0[1] * ktmp[2];
sum3 += r0[1] * ktmp[3];
sum4 += r0[1] * ktmp[4];
sum5 += r0[1] * ktmp[5];
sum6 += r0[1] * ktmp[6];
sum7 += r0[1] * ktmp[7];
ktmp += 8;
sum0 += r0[2] * ktmp[0];
sum1 += r0[2] * ktmp[1];
sum2 += r0[2] * ktmp[2];
sum3 += r0[2] * ktmp[3];
sum4 += r0[2] * ktmp[4];
sum5 += r0[2] * ktmp[5];
sum6 += r0[2] * ktmp[6];
sum7 += r0[2] * ktmp[7];
ktmp += 8;
sum0 += r1[0] * ktmp[0];
sum1 += r1[0] * ktmp[1];
sum2 += r1[0] * ktmp[2];
sum3 += r1[0] * ktmp[3];
sum4 += r1[0] * ktmp[4];
sum5 += r1[0] * ktmp[5];
sum6 += r1[0] * ktmp[6];
sum7 += r1[0] * ktmp[7];
ktmp += 8;
sum0 += r1[1] * ktmp[0];
sum1 += r1[1] * ktmp[1];
sum2 += r1[1] * ktmp[2];
sum3 += r1[1] * ktmp[3];
sum4 += r1[1] * ktmp[4];
sum5 += r1[1] * ktmp[5];
sum6 += r1[1] * ktmp[6];
sum7 += r1[1] * ktmp[7];
ktmp += 8;
sum0 += r1[2] * ktmp[0];
sum1 += r1[2] * ktmp[1];
sum2 += r1[2] * ktmp[2];
sum3 += r1[2] * ktmp[3];
sum4 += r1[2] * ktmp[4];
sum5 += r1[2] * ktmp[5];
sum6 += r1[2] * ktmp[6];
sum7 += r1[2] * ktmp[7];
ktmp += 8;
sum0 += r2[0] * ktmp[0];
sum1 += r2[0] * ktmp[1];
sum2 += r2[0] * ktmp[2];
sum3 += r2[0] * ktmp[3];
sum4 += r2[0] * ktmp[4];
sum5 += r2[0] * ktmp[5];
sum6 += r2[0] * ktmp[6];
sum7 += r2[0] * ktmp[7];
ktmp += 8;
sum0 += r2[1] * ktmp[0];
sum1 += r2[1] * ktmp[1];
sum2 += r2[1] * ktmp[2];
sum3 += r2[1] * ktmp[3];
sum4 += r2[1] * ktmp[4];
sum5 += r2[1] * ktmp[5];
sum6 += r2[1] * ktmp[6];
sum7 += r2[1] * ktmp[7];
ktmp += 8;
sum0 += r2[2] * ktmp[0];
sum1 += r2[2] * ktmp[1];
sum2 += r2[2] * ktmp[2];
sum3 += r2[2] * ktmp[3];
sum4 += r2[2] * ktmp[4];
sum5 += r2[2] * ktmp[5];
sum6 += r2[2] * ktmp[6];
sum7 += r2[2] * ktmp[7];
ktmp += 8;
*outptr0 += sum0;
*outptr1 += sum1;
*outptr2 += sum2;
*outptr3 += sum3;
*outptr4 += sum4;
*outptr5 += sum5;
*outptr6 += sum6;
*outptr7 += sum7;
ktmp -= 8*9;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
outptr4++;
outptr5++;
outptr6++;
outptr7++;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
ktmp += 8*9;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* ktmp = _kernel.channel(p/8 + p%8);
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* k0 = ktmp;
const float* k1 = ktmp + 3;
const float* k2 = ktmp + 6;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k3456 = vld1q_f32(k1);
float32x4_t _k6789 = vld1q_f32(k2);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"fmla v0.4s, v2.4s, %10.s[0] \n"
"fmul v10.4s, v3.4s, %10.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmul v11.4s, v1.4s, %10.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v2.4s, v3.4s}, [%3], #32 \n"
"fmla v0.4s, v2.4s, %11.s[0] \n"
"fmla v10.4s, v3.4s, %11.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %11.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v2.4s, v3.4s}, [%4], #32 \n"
"fmla v0.4s, v2.4s, %12.s[0] \n"
"fmla v10.4s, v3.4s, %12.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %12.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"fadd v0.4s, v0.4s, v10.4s \n"
"fadd v0.4s, v0.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v0.4s}, [%1], #16 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1] \n"
"vmla.f32 q0, q2, %e10[0] \n"
"vmul.f32 q10, q3, %e10[1] \n"
"pld [%2, #128] \n"
"vld2.f32 {d16-d17}, [%2] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmul.f32 q11, q1, %f10[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vmla.f32 q0, q2, %e11[0] \n"
"vmla.f32 q10, q3, %e11[1] \n"
"pld [%3, #128] \n"
"vld2.f32 {d16-d17}, [%3] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f11[0] \n"
"pld [%4, #256] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vmla.f32 q0, q2, %e12[0] \n"
"vmla.f32 q10, q3, %e12[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d16-d17}, [%4] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f12[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vadd.f32 q0, q0, q10 \n"
"vadd.f32 q0, q0, q11 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * ktmp[0];
sum += r0[1] * ktmp[1];
sum += r0[2] * ktmp[2];
sum += r1[0] * ktmp[3];
sum += r1[1] * ktmp[4];
sum += r1[2] * ktmp[5];
sum += r2[0] * ktmp[6];
sum += r2[1] * ktmp[7];
sum += r2[2] * ktmp[8];
*outptr += sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
ktmp += 9;
}
}
}
|
deconv_2d.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_OPS_DECONV_2D_H_
#define MACE_OPS_DECONV_2D_H_
#include <algorithm>
#include <string>
#include <vector>
#include "mace/core/operator.h"
#include "mace/core/types.h"
#include "mace/ops/activation.h"
#include "mace/ops/conv_pool_2d_util.h"
namespace mace {
namespace ops {
enum FrameworkType {
TENSORFLOW = 0,
CAFFE = 1,
};
class Deconv2dOpBase : public Operation {
public:
explicit Deconv2dOpBase(OpConstructContext *context)
: Operation(context),
strides_(Operation::GetRepeatedArgs<int>("strides")),
padding_type_(static_cast<Padding>(Operation::GetOptionalArg<int>(
"padding", static_cast<int>(SAME)))),
paddings_(Operation::GetRepeatedArgs<int>("padding_values")),
group_(Operation::GetOptionalArg<int>("group", 1)),
model_type_(static_cast<ops::FrameworkType>(
Operation::GetOptionalArg<int>("framework_type", 0))),
activation_(ops::StringToActivationType(
Operation::GetOptionalArg<std::string>("activation",
"NOOP"))),
relux_max_limit_(
Operation::GetOptionalArg<float>("max_limit", 0.0f)) {}
static void CalcDeconvShape_Caffe(
const index_t *input_shape, // NHWC
const index_t *filter_shape, // OIHW
const int *strides,
const int *out_paddings,
const int group,
int *in_paddings,
index_t *out_shape,
index_t *padded_out_shape,
const bool isNCHW = false) {
MACE_CHECK_NOTNULL(out_paddings);
MACE_CHECK_NOTNULL(input_shape);
MACE_CHECK_NOTNULL(filter_shape);
MACE_CHECK_NOTNULL(strides);
const index_t in_height = isNCHW ? input_shape[2] : input_shape[1];
const index_t in_width = isNCHW ? input_shape[3] : input_shape[2];
const index_t output_channel = filter_shape[0] * group;
const index_t kernel_h = filter_shape[2];
const index_t kernel_w = filter_shape[3];
index_t padded_out_height =
(in_height - 1) * strides[0] + kernel_h;
index_t padded_out_width =
(in_width - 1) * strides[1] + kernel_w;
if (in_paddings != nullptr) {
in_paddings[0] = static_cast<int>((kernel_h - 1) * 2 - out_paddings[0]);
in_paddings[1] = static_cast<int>((kernel_w - 1) * 2 - out_paddings[1]);
in_paddings[0] = std::max<int>(0, in_paddings[0]);
in_paddings[1] = std::max<int>(0, in_paddings[1]);
}
if (padded_out_shape != nullptr) {
padded_out_shape[0] = input_shape[0];
padded_out_shape[1] = isNCHW ? output_channel : padded_out_height;
padded_out_shape[2] = isNCHW ? padded_out_height : padded_out_width;
padded_out_shape[3] = isNCHW ? padded_out_width : output_channel;
}
if (out_shape != nullptr) {
index_t out_height = padded_out_height - out_paddings[0];
index_t out_width = padded_out_width - out_paddings[1];
out_shape[0] = input_shape[0];
out_shape[1] = isNCHW ? output_channel : out_height;
out_shape[2] = isNCHW ? out_height : out_width;
out_shape[3] = isNCHW ? out_width : output_channel;
}
}
static void CalcDeconvShape_TF(
const index_t *input_shape, // NHWC
const index_t *filter_shape, // OIHW
const index_t *output_shape,
const int *strides,
const int group,
Padding padding_type,
int *in_paddings,
int *out_paddings,
index_t *padded_out_shape,
const bool isNCHW = false) {
MACE_CHECK_NOTNULL(output_shape);
MACE_CHECK_NOTNULL(input_shape);
MACE_CHECK_NOTNULL(filter_shape);
MACE_CHECK_NOTNULL(strides);
const index_t in_height = isNCHW ? input_shape[2] : input_shape[1];
const index_t in_width = isNCHW ? input_shape[3] : input_shape[2];
const index_t out_height = isNCHW ? output_shape[2] : output_shape[1];
const index_t out_width = isNCHW ? output_shape[3] : output_shape[2];
const index_t extended_in_height = (in_height - 1) * strides[0] + 1;
const index_t extended_in_width = (in_width - 1) * strides[1] + 1;
const index_t kernel_h = filter_shape[2];
const index_t kernel_w = filter_shape[3];
index_t expected_input_height = 0, expected_input_width = 0;
switch (padding_type) {
case VALID:
expected_input_height =
(out_height - kernel_h + strides[0]) / strides[0];
expected_input_width =
(out_width - kernel_w + strides[1]) / strides[1];
break;
case SAME:
expected_input_height =
(out_height + strides[0] - 1) / strides[0];
expected_input_width =
(out_width + strides[1] - 1) / strides[1];
break;
default:
MACE_CHECK(false, "Unsupported padding type: ", padding_type);
}
MACE_CHECK(expected_input_height == in_height,
expected_input_height, "!=", in_height);
MACE_CHECK(expected_input_width == in_width,
expected_input_width, "!=", in_width);
const index_t padded_out_height =
(in_height - 1) * strides[0] + kernel_h;
const index_t padded_out_width =
(in_width - 1) * strides[1] + kernel_w;
if (in_paddings != nullptr) {
const int p_h =
static_cast<int>(out_height + kernel_h - 1 - extended_in_height);
const int p_w =
static_cast<int>(out_width + kernel_w - 1 - extended_in_width);
in_paddings[0] = std::max<int>(0, p_h);
in_paddings[1] = std::max<int>(0, p_w);
}
if (out_paddings != nullptr) {
const int o_p_h = static_cast<int>(padded_out_height - out_height);
const int o_p_w = static_cast<int>(padded_out_width - out_width);
out_paddings[0] = std::max<int>(0, o_p_h);
out_paddings[1] = std::max<int>(0, o_p_w);
}
if (padded_out_shape != nullptr) {
index_t output_channel = filter_shape[0] * group;
padded_out_shape[0] = output_shape[0];
padded_out_shape[1] = isNCHW ? output_channel : padded_out_height;
padded_out_shape[2] = isNCHW ? padded_out_height : padded_out_width;
padded_out_shape[3] = isNCHW ? padded_out_width : output_channel;
}
}
protected:
std::vector<int> strides_; // [stride_h, stride_w]
const Padding padding_type_;
std::vector<int> paddings_;
const int group_;
const FrameworkType model_type_;
const ActivationType activation_;
const float relux_max_limit_;
};
template <typename T>
void CropPadOut(const T *input,
const index_t *in_shape,
const index_t *out_shape,
const index_t pad_h,
const index_t pad_w,
T *output) {
const index_t batch = in_shape[0];
const index_t channel = in_shape[1];
const index_t in_height = in_shape[2];
const index_t in_width = in_shape[3];
const index_t out_height = out_shape[2];
const index_t out_width = out_shape[3];
#pragma omp parallel for collapse(3)
for (int i = 0; i < batch; ++i) {
for (int j = 0; j < channel; ++j) {
for (int k = 0; k < out_height; ++k) {
const T *input_base =
input + ((i * channel + j) * in_height + (k + pad_h)) * in_width;
T *output_base =
output + ((i * channel + j) * out_height + k)* out_width;
memcpy(output_base, input_base + pad_w, out_width * sizeof(T));
}
}
}
}
} // namespace ops
} // namespace mace
#endif // MACE_OPS_DECONV_2D_H_
|
mdc2_fmt_plug.c | /*
* Cracker for MDC-2 (MDC-2DES) hashes.
*
* This software is Copyright (c) 2014 Dhiru Kholia <dhiru at openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without#
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mdc2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mdc2);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // XXX
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#include "mdc2-JtR.h"
#define FORMAT_LABEL "mdc2"
#define FORMAT_NAME "MDC-2"
#define FORMAT_TAG "$mdc2$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "MDC-2DES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$mdc2$000ed54e093d61679aefbeae05bfe33a", "The quick brown fox jumps over the lazy dog"},
{"775f59f8e51aec29c57ac6ab850d58e8", "The quick brown fox jumps over the lazy cog"},
{"52525252525252522525252525252525", ""},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext;
int extra;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
if (hexlenl(p, &extra) != BINARY_SIZE*2 || extra)
return 0;
return 1;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + 2 * BINARY_SIZE + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return ciphertext;
strcpy(out, FORMAT_TAG);
strcpy(&out[TAG_LENGTH], ciphertext);
return out;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p = ciphertext;
int i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = ciphertext + TAG_LENGTH;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
#define COMMON_GET_HASH_VAR crypt_out
#include "common-get-hash.h"
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
JtR_MDC2_CTX ctx;
JtR_MDC2_Init(&ctx);
JtR_MDC2_Update(&ctx, (unsigned char*)saved_key[index], saved_len[index]);
JtR_MDC2_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (((uint32_t*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void mdc2_set_key(char *key, int index)
{
saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_mdc2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
mdc2_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
#define COMMON_GET_HASH_LINK
#include "common-get-hash.h"
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
DRB088-dynamic-storage-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
For the case of a variable which is not referenced within a construct:
objects with dynamic storage duration should be shared.
Putting it within a threadprivate directive may cause seg fault since
threadprivate copies are not allocated!
Dependence pair: *counter@63:6 vs. *counter@63:6
*/
#include "omprace.h"
#include <omp.h>
#include<stdio.h>
#include<stdlib.h>
int* counter;
void foo()
{
(*counter)++;
}
int main()
{
omprace_init();
counter = (int*) malloc(sizeof(int));
if (counter== NULL)
{
fprintf(stderr, "malloc() failes\n");
exit(1);
}
*counter = 0;
#pragma omp parallel
{
foo();
}
printf("%d \n", *counter);
free (counter);
omprace_fini();
return 0;
}
|
task-taskwait.c | /*
Copyright (c) 2015-2019, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Simone Atzeni (simone@cs.utah.edu), Joachim Protze
(joachim.protze@tu-dresden.de), Jonas Hahnfeld
(hahnfeld@itc.rwth-aachen.de), Ganesh Gopalakrishnan, Zvonimir
Rakamaric, Dong H. Ahn, Gregory L. Lee, Ignacio Laguna, and Martin
Schulz.
LLNL-CODE-773957
All rights reserved.
This file is part of Archer. For details, see
https://pruners.github.io/archer. Please also read
https://github.com/PRUNERS/archer/blob/master/LICENSE.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE
LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// RUN: %libarcher-compile-and-run | FileCheck %s
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
int main(int argc, char* argv[])
{
int var = 0;
#pragma omp parallel num_threads(2) shared(var)
#pragma omp master
{
#pragma omp task shared(var)
{
var++;
}
// Give other thread time to steal the task.
sleep(1);
#pragma omp taskwait
var++;
}
fprintf(stderr, "DONE\n");
int error = (var != 2);
return error;
}
// CHECK: DONE
|
zpbtrs.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_pbtrs
*
* Solves a system of linear equations A * X = B with a Hermitian positive definite
* matrix A using the Cholesky factorization of A (i.e., A = L*L^T or A = U^T*U)
* computed by plasma_zpbtrf.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in] kd
* The number of suuperdiagonals within the band of A if uplo is upper,
* or the number of suuperdiagonals if uplo is lower. kd >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of
* columns of the matrix B. nrhs >= 0.
*
* @param[in,out] AB
* The triangular factor U or L from the Cholesky
* factorization A = U^H*U or A = L*L^H, computed by
* plasma_zpotrf.
* Remark: If out-of-place layout translation is used, the
* matrix A can be considered as input, however if inplace
* layout translation is enabled, the content of A will be
* reordered for computation and restored before exiting the
* function.
*
* @param[in] ldab
* The leading dimension of the array AB.
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_zpbtrs
* @sa plasma_cpbtrs
* @sa plasma_dpbtrs
* @sa plasma_spbtrs
* @sa plasma_zpbtrf
*
******************************************************************************/
int plasma_zpbtrs(plasma_enum_t uplo,
int n, int kd, int nrhs,
plasma_complex64_t *pAB, int ldab,
plasma_complex64_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (kd < 0) {
plasma_error("illegal value of kd");
return -4;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -5;
}
if (ldab < imax(1, 1+kd)) {
plasma_error("illegal value of ldab");
return -7;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (imax(n, nrhs) == 0)
return PlasmaSuccess;
// Set tiling parameters.
int nb = plasma->nb;
// Initialize tile matrix descriptors.
int lm = nb*(1+(kd+nb-1)/nb);
plasma_desc_t AB;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_band_create(PlasmaComplexDouble, uplo, nb, nb,
lm, n, 0, 0, n, n, kd, kd,
&AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_band_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zpb2desc(pAB, ldab, AB, sequence, &request);
plasma_omp_zge2desc(pB, ldb, B, sequence, &request);
// Call the tile async function.
plasma_omp_zpbtrs(uplo, AB, B, sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(B, pB, ldb, sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&AB);
plasma_desc_destroy(&B);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_pbtrs
*
* Solves a system of linear equations using previously
* computed Cholesky factorization.
* Non-blocking tile version of plasma_zpbtrs().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] AB
* The triangular factor U or L from the Cholesky factorization
* A = U^H*U or A = L*L^H, computed by plasma_zpotrf.
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zpbtrs
* @sa plasma_omp_zpbtrs
* @sa plasma_omp_cpbtrs
* @sa plasma_omp_dpbtrs
* @sa plasma_omp_spbtrs
* @sa plasma_omp_zpbtrf
*
******************************************************************************/
void plasma_omp_zpbtrs(plasma_enum_t uplo, plasma_desc_t AB, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(AB) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (AB.n == 0 || B.n == 0)
return;
// Call the parallel functions.
plasma_pztbsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans,
PlasmaNonUnit,
1.0, AB,
B,
NULL,
sequence, request);
plasma_pztbsm(PlasmaLeft, uplo,
uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans,
PlasmaNonUnit,
1.0, AB,
B,
NULL,
sequence, request);
}
|
convolution_1x1_pack8to1_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack8to1_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 8a-inch/8a-outch
kernel_tm_pack8to1.create(8, inch / 8, outch / 8 + outch % 8, (size_t)2u * 8, 8);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* k0 = (const float*)kernel + (p + 0) * inch;
const float* k1 = (const float*)kernel + (p + 1) * inch;
const float* k2 = (const float*)kernel + (p + 2) * inch;
const float* k3 = (const float*)kernel + (p + 3) * inch;
const float* k4 = (const float*)kernel + (p + 4) * inch;
const float* k5 = (const float*)kernel + (p + 5) * inch;
const float* k6 = (const float*)kernel + (p + 6) * inch;
const float* k7 = (const float*)kernel + (p + 7) * inch;
__fp16* g0 = kernel_tm_pack8to1.channel(p / 8);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g0[0] = (__fp16)k0[i];
g0[1] = (__fp16)k1[i];
g0[2] = (__fp16)k2[i];
g0[3] = (__fp16)k3[i];
g0[4] = (__fp16)k4[i];
g0[5] = (__fp16)k5[i];
g0[6] = (__fp16)k6[i];
g0[7] = (__fp16)k7[i];
g0 += 8;
}
k0 += 8;
k1 += 8;
k2 += 8;
k3 += 8;
k4 += 8;
k5 += 8;
k6 += 8;
k7 += 8;
}
}
for (; p < outch; p++)
{
const float* k0 = (const float*)kernel + p * inch;
__fp16* g0 = kernel_tm_pack8to1.channel(p / 8 + p % 8);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g0[0] = (__fp16)k0[i];
g0 += 1;
}
k0 += 8;
}
}
}
static void conv1x1s1_sgemm_pack8to1_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const __fp16* bias = _bias;
// interleave
Mat tmp;
if (size >= 8)
tmp.create(8, inch, size / 8 + (size % 8) / 4 + size % 4, elemsize, elempack, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4, inch, size / 4 + size % 4, elemsize, elempack, opt.workspace_allocator);
else // if (size >= 1)
tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator);
{
int nn_size;
int remain_size_start = 0;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 8;
__fp16* tmpptr = tmp.channel(i / 8);
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 8;
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
for (int q = 0; q < inch; q++)
{
// transpose 8x4
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 8;
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
img0 += bottom_blob.cstep * 8;
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
__fp16* outptr0 = top_blob.channel(p);
__fp16* outptr1 = top_blob.channel(p + 1);
__fp16* outptr2 = top_blob.channel(p + 2);
__fp16* outptr3 = top_blob.channel(p + 3);
__fp16* outptr4 = top_blob.channel(p + 4);
__fp16* outptr5 = top_blob.channel(p + 5);
__fp16* outptr6 = top_blob.channel(p + 6);
__fp16* outptr7 = top_blob.channel(p + 7);
const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p : zeros;
float16x8_t _bias0 = vld1q_f16(biasptr);
int i = 0;
for (; i + 7 < size; i += 8)
{
__fp16* tmpptr = tmp.channel(i / 8);
const __fp16* kptr = kernel.channel(p / 8);
int nn = inch; // inch always > 0
asm volatile(
"dup v24.8h, %22.h[0] \n"
"dup v25.8h, %22.h[1] \n"
"dup v26.8h, %22.h[2] \n"
"dup v27.8h, %22.h[3] \n"
"dup v28.8h, %22.h[4] \n"
"dup v29.8h, %22.h[5] \n"
"dup v30.8h, %22.h[6] \n"
"dup v31.8h, %22.h[7] \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n"
"fmla v24.8h, v16.8h, v0.h[0] \n"
"fmla v25.8h, v16.8h, v0.h[1] \n"
"fmla v26.8h, v16.8h, v0.h[2] \n"
"fmla v27.8h, v16.8h, v0.h[3] \n"
"fmla v28.8h, v16.8h, v0.h[4] \n"
"fmla v29.8h, v16.8h, v0.h[5] \n"
"fmla v30.8h, v16.8h, v0.h[6] \n"
"fmla v31.8h, v16.8h, v0.h[7] \n"
"fmla v24.8h, v17.8h, v1.h[0] \n"
"fmla v25.8h, v17.8h, v1.h[1] \n"
"fmla v26.8h, v17.8h, v1.h[2] \n"
"fmla v27.8h, v17.8h, v1.h[3] \n"
"fmla v28.8h, v17.8h, v1.h[4] \n"
"fmla v29.8h, v17.8h, v1.h[5] \n"
"fmla v30.8h, v17.8h, v1.h[6] \n"
"fmla v31.8h, v17.8h, v1.h[7] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%9], #64 \n"
"fmla v24.8h, v18.8h, v2.h[0] \n"
"fmla v25.8h, v18.8h, v2.h[1] \n"
"fmla v26.8h, v18.8h, v2.h[2] \n"
"fmla v27.8h, v18.8h, v2.h[3] \n"
"fmla v28.8h, v18.8h, v2.h[4] \n"
"fmla v29.8h, v18.8h, v2.h[5] \n"
"fmla v30.8h, v18.8h, v2.h[6] \n"
"fmla v31.8h, v18.8h, v2.h[7] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n"
"fmla v24.8h, v19.8h, v3.h[0] \n"
"fmla v25.8h, v19.8h, v3.h[1] \n"
"fmla v26.8h, v19.8h, v3.h[2] \n"
"fmla v27.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v19.8h, v3.h[4] \n"
"fmla v29.8h, v19.8h, v3.h[5] \n"
"fmla v30.8h, v19.8h, v3.h[6] \n"
"fmla v31.8h, v19.8h, v3.h[7] \n"
"fmla v24.8h, v20.8h, v4.h[0] \n"
"fmla v25.8h, v20.8h, v4.h[1] \n"
"fmla v26.8h, v20.8h, v4.h[2] \n"
"fmla v27.8h, v20.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[5] \n"
"fmla v30.8h, v20.8h, v4.h[6] \n"
"fmla v31.8h, v20.8h, v4.h[7] \n"
"fmla v24.8h, v21.8h, v5.h[0] \n"
"fmla v25.8h, v21.8h, v5.h[1] \n"
"fmla v26.8h, v21.8h, v5.h[2] \n"
"fmla v27.8h, v21.8h, v5.h[3] \n"
"fmla v28.8h, v21.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[6] \n"
"fmla v31.8h, v21.8h, v5.h[7] \n"
"fmla v24.8h, v22.8h, v6.h[0] \n"
"fmla v25.8h, v22.8h, v6.h[1] \n"
"fmla v26.8h, v22.8h, v6.h[2] \n"
"fmla v27.8h, v22.8h, v6.h[3] \n"
"fmla v28.8h, v22.8h, v6.h[4] \n"
"fmla v29.8h, v22.8h, v6.h[5] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.8h, v23.8h, v7.h[0] \n"
"fmla v25.8h, v23.8h, v7.h[1] \n"
"fmla v26.8h, v23.8h, v7.h[2] \n"
"fmla v27.8h, v23.8h, v7.h[3] \n"
"fmla v28.8h, v23.8h, v7.h[4] \n"
"fmla v29.8h, v23.8h, v7.h[5] \n"
"fmla v30.8h, v23.8h, v7.h[6] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v24.8h}, [%1], #16 \n"
"st1 {v25.8h}, [%2], #16 \n"
"st1 {v26.8h}, [%3], #16 \n"
"st1 {v27.8h}, [%4], #16 \n"
"st1 {v28.8h}, [%5], #16 \n"
"st1 {v29.8h}, [%6], #16 \n"
"st1 {v30.8h}, [%7], #16 \n"
"st1 {v31.8h}, [%8], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"w"(_bias0) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel.channel(p / 8);
int nn = inch; // inch always > 0
asm volatile(
"dup v24.4h, %22.h[0] \n"
"dup v25.4h, %22.h[1] \n"
"dup v26.4h, %22.h[2] \n"
"dup v27.4h, %22.h[3] \n"
"dup v28.4h, %22.h[4] \n"
"dup v29.4h, %22.h[5] \n"
"dup v30.4h, %22.h[6] \n"
"dup v31.4h, %22.h[7] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n"
"fmla v24.4h, v16.4h, v0.h[0] \n"
"fmla v25.4h, v16.4h, v0.h[1] \n"
"fmla v26.4h, v16.4h, v0.h[2] \n"
"fmla v27.4h, v16.4h, v0.h[3] \n"
"fmla v28.4h, v16.4h, v0.h[4] \n"
"fmla v29.4h, v16.4h, v0.h[5] \n"
"fmla v30.4h, v16.4h, v0.h[6] \n"
"fmla v31.4h, v16.4h, v0.h[7] \n"
"fmla v24.4h, v17.4h, v1.h[0] \n"
"fmla v25.4h, v17.4h, v1.h[1] \n"
"fmla v26.4h, v17.4h, v1.h[2] \n"
"fmla v27.4h, v17.4h, v1.h[3] \n"
"fmla v28.4h, v17.4h, v1.h[4] \n"
"fmla v29.4h, v17.4h, v1.h[5] \n"
"fmla v30.4h, v17.4h, v1.h[6] \n"
"fmla v31.4h, v17.4h, v1.h[7] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%9], #32 \n"
"fmla v24.4h, v18.4h, v2.h[0] \n"
"fmla v25.4h, v18.4h, v2.h[1] \n"
"fmla v26.4h, v18.4h, v2.h[2] \n"
"fmla v27.4h, v18.4h, v2.h[3] \n"
"fmla v28.4h, v18.4h, v2.h[4] \n"
"fmla v29.4h, v18.4h, v2.h[5] \n"
"fmla v30.4h, v18.4h, v2.h[6] \n"
"fmla v31.4h, v18.4h, v2.h[7] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n"
"fmla v24.4h, v19.4h, v3.h[0] \n"
"fmla v25.4h, v19.4h, v3.h[1] \n"
"fmla v26.4h, v19.4h, v3.h[2] \n"
"fmla v27.4h, v19.4h, v3.h[3] \n"
"fmla v28.4h, v19.4h, v3.h[4] \n"
"fmla v29.4h, v19.4h, v3.h[5] \n"
"fmla v30.4h, v19.4h, v3.h[6] \n"
"fmla v31.4h, v19.4h, v3.h[7] \n"
"fmla v24.4h, v20.4h, v4.h[0] \n"
"fmla v25.4h, v20.4h, v4.h[1] \n"
"fmla v26.4h, v20.4h, v4.h[2] \n"
"fmla v27.4h, v20.4h, v4.h[3] \n"
"fmla v28.4h, v20.4h, v4.h[4] \n"
"fmla v29.4h, v20.4h, v4.h[5] \n"
"fmla v30.4h, v20.4h, v4.h[6] \n"
"fmla v31.4h, v20.4h, v4.h[7] \n"
"fmla v24.4h, v21.4h, v5.h[0] \n"
"fmla v25.4h, v21.4h, v5.h[1] \n"
"fmla v26.4h, v21.4h, v5.h[2] \n"
"fmla v27.4h, v21.4h, v5.h[3] \n"
"fmla v28.4h, v21.4h, v5.h[4] \n"
"fmla v29.4h, v21.4h, v5.h[5] \n"
"fmla v30.4h, v21.4h, v5.h[6] \n"
"fmla v31.4h, v21.4h, v5.h[7] \n"
"fmla v24.4h, v22.4h, v6.h[0] \n"
"fmla v25.4h, v22.4h, v6.h[1] \n"
"fmla v26.4h, v22.4h, v6.h[2] \n"
"fmla v27.4h, v22.4h, v6.h[3] \n"
"fmla v28.4h, v22.4h, v6.h[4] \n"
"fmla v29.4h, v22.4h, v6.h[5] \n"
"fmla v30.4h, v22.4h, v6.h[6] \n"
"fmla v31.4h, v22.4h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4h, v23.4h, v7.h[0] \n"
"fmla v25.4h, v23.4h, v7.h[1] \n"
"fmla v26.4h, v23.4h, v7.h[2] \n"
"fmla v27.4h, v23.4h, v7.h[3] \n"
"fmla v28.4h, v23.4h, v7.h[4] \n"
"fmla v29.4h, v23.4h, v7.h[5] \n"
"fmla v30.4h, v23.4h, v7.h[6] \n"
"fmla v31.4h, v23.4h, v7.h[7] \n"
"bne 0b \n"
"st1 {v24.4h}, [%1], #8 \n"
"st1 {v25.4h}, [%2], #8 \n"
"st1 {v26.4h}, [%3], #8 \n"
"st1 {v27.4h}, [%4], #8 \n"
"st1 {v28.4h}, [%5], #8 \n"
"st1 {v29.4h}, [%6], #8 \n"
"st1 {v30.4h}, [%7], #8 \n"
"st1 {v31.4h}, [%8], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"w"(_bias0) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel.channel(p / 8);
int nn = inch; // inch always > 0
asm volatile(
"mov v30.16b, %22.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.8h}, [%9], #16 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%10], #64 \n"
"fmla v30.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%10], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v30.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v30.h}[0], [%1], #2 \n"
"st1 {v30.h}[1], [%2], #2 \n"
"st1 {v30.h}[2], [%3], #2 \n"
"st1 {v30.h}[3], [%4], #2 \n"
"st1 {v30.h}[4], [%5], #2 \n"
"st1 {v30.h}[5], [%6], #2 \n"
"st1 {v30.h}[6], [%7], #2 \n"
"st1 {v30.h}[7], [%8], #2 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"w"(_bias0) // %22
: "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30");
}
}
remain_outch_start += nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
__fp16* outptr0 = top_blob.channel(p);
const __fp16 bias0 = bias ? bias[p] : 0.f;
float16x8_t _bias0 = vdupq_n_f16(bias0);
int i = 0;
for (; i + 7 < size; i += 8)
{
__fp16* tmpptr = tmp.channel(i / 8);
const __fp16* kptr = kernel.channel(p / 8 + p % 8);
int nn = inch; // inch always > 0
asm volatile(
"mov v30.16b, %8.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3], #16 \n"
"fmla v30.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v30.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v30.8h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"w"(_bias0) // %8
: "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30");
}
for (; i + 3 < size; i += 4)
{
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const __fp16* kptr = kernel.channel(p / 8 + p % 8);
int nn = inch; // inch always > 0
asm volatile(
"mov v30.16b, %8.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3], #16 \n"
"fmla v30.4h, v16.4h, v0.h[0] \n"
"fmla v30.4h, v17.4h, v0.h[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"
"fmla v30.4h, v18.4h, v0.h[2] \n"
"fmla v30.4h, v19.4h, v0.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v30.4h, v20.4h, v0.h[4] \n"
"fmla v30.4h, v21.4h, v0.h[5] \n"
"fmla v30.4h, v22.4h, v0.h[6] \n"
"fmla v30.4h, v23.4h, v0.h[7] \n"
"bne 0b \n"
"st1 {v30.4h}, [%1], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"w"(_bias0) // %8
: "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30");
}
for (; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const __fp16* kptr = kernel.channel(p / 8 + p % 8);
float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f);
for (int q = 0; q < inch; q++)
{
float16x8_t _r0 = vld1q_f16(tmpptr);
float16x8_t _k0 = vld1q_f16(kptr);
_sum0 = vfmaq_f16(_sum0, _r0, _k0);
kptr += 8;
tmpptr += 8;
}
__fp16 sum0 = bias0 + vaddvq_f32(vcvt_f32_f16(vadd_f16(vget_low_f16(_sum0), vget_high_f16(_sum0))));
outptr0[0] = sum0;
outptr0++;
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// __fp16* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const __fp16* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const __fp16* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack8to1_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const __fp16* r0 = bottom_blob.channel(p);
__fp16* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float16x8_t _v0 = vld1q_f16(r0);
float16x8_t _v1 = vld1q_f16(r0 + 16);
float16x8_t _v2 = vld1q_f16(r0 + 32);
float16x8_t _v3 = vld1q_f16(r0 + 48);
vst1q_f16(outptr, _v0);
vst1q_f16(outptr + 8, _v1);
vst1q_f16(outptr + 16, _v2);
vst1q_f16(outptr + 24, _v3);
r0 += 64;
outptr += 32;
}
for (; j + 1 < outw; j += 2)
{
float16x8_t _v0 = vld1q_f16(r0);
float16x8_t _v1 = vld1q_f16(r0 + 16);
vst1q_f16(outptr, _v0);
vst1q_f16(outptr + 8, _v1);
r0 += 32;
outptr += 16;
}
for (; j < outw; j++)
{
float16x8_t _v = vld1q_f16(r0);
vst1q_f16(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8to1_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
task-taskgroup-unrelated.c | /*
* task-taskgroup-unrelated.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run-race | FileCheck %s
// REQUIRES: tsan
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
#include "ompt/ompt-signal.h"
int main(int argc, char *argv[]) {
int var = 0, a = 0;
#pragma omp parallel num_threads(2) shared(var, a)
#pragma omp master
{
#pragma omp task shared(var, a)
{
var++;
OMPT_SIGNAL(a);
// Give master thread time to execute the task in the taskgroup.
OMPT_WAIT(a, 2);
}
#pragma omp taskgroup
{
#pragma omp task if (0)
{
// Dummy task.
}
// Give other threads time to steal the tasks.
OMPT_WAIT(a, 1);
OMPT_SIGNAL(a);
}
var++;
}
int error = (var != 2);
fprintf(stderr, "DONE\n");
return error;
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK-NEXT: {{(Write|Read)}} of size 4
// CHECK-NEXT: #0 {{.*}}task-taskgroup-unrelated.c:46
// CHECK: Previous write of size 4
// CHECK-NEXT: #0 {{.*}}task-taskgroup-unrelated.c:28
// CHECK: DONE
// CHECK: ThreadSanitizer: reported 1 warnings
|
mixed_tentusscher_myo_epi_2004_S2_15.c | // Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S2_15.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3587896185485,0.00134905676198612,0.774520199073544,0.774374680229778,0.000180109822242613,0.482910475890506,0.00298448938077068,0.999998277316546,2.00218048619546e-08,1.94462488964510e-05,0.999771668739369,1.00653067523994,0.999978756030136,5.82428650958820e-05,0.509754826080024,10.0682799523111,139.518219822205}; for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.3174380565213,0.000348866478117368,0.000146417624096425,0.000608670241592454,0.271667442889255,0.133939514082262,0.188380543281873,4.94330134063706,0.0151351354091834,2.05397398481996,1086.72577633731,0.000456799361732942,0.364037161557358,0.0196983207210989,0.00114898938065171,6.40398010166762e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
sxc_fmt_plug.c | /* SXC cracker patch for JtR. Hacked together during Summer of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_sxc;
#elif FMT_REGISTERS_H
john_register_one(&fmt_sxc);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "sha.h"
#include <openssl/blowfish.h>
#include "pbkdf2_hmac_sha1.h"
#ifdef _OPENMP
#include <omp.h>
#define OMP_SCALE 2 // tuned on core i7
#endif
#include "memdbg.h"
#define FORMAT_LABEL "sxc"
#define FORMAT_NAME "StarOffice .sxc"
#ifdef MMX_COEF
#define ALGORITHM_NAME "SHA1 Blowfish " SHA1_N_STR MMX_TYPE
#else
#define ALGORITHM_NAME "SHA1 Blowfish 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#ifdef MMX_COEF
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests sxc_tests[] = {
{"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int cipher_type; // FIXME: cipher_type seems to be ignored
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int original_length;
int length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int ishex(char *q)
{
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res;
if (strncmp(ciphertext, "$sxc$", 5))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 6;
if ((p = strtok(ctcopy, "*")) == NULL) /* cipher type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* checksum type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* iterations */
goto err;
res = atoi(p);
if (res <= 0)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
if (strlen(p) != BINARY_SIZE * 2)
goto err;
if (!ishex(p))
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res <= 0 || res > 16)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* iv */
goto err;
if (strlen(p) != res * 2)
goto err;
if (!ishex(p))
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* salt length */
goto err;
res = atoi(p);
if (res <= 0 || res > 32)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* salt */
goto err;
if (strlen(p) != res * 2)
goto err;
if (!ishex(p))
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* original length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024)
goto err;
if ((p = strtok(NULL, "*")) == NULL) /* content */
goto err;
if (strlen(p) != res * 2)
goto err;
if (!ishex(p))
goto err;
if (strtok(NULL, "*") != NULL) /* the end */
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += 6; /* skip over "$sxc$*" */
p = strtok(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtok(NULL, "*");
cs.checksum_type = atoi(p);
p = strtok(NULL, "*");
cs.iterations = atoi(p);
p = strtok(NULL, "*");
cs.key_size = atoi(p);
strtok(NULL, "*");
/* skip checksum field */
p = strtok(NULL, "*");
cs.iv_length = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
cs.salt_length = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "*");
cs.original_length = atoi(p);
p = strtok(NULL, "*");
cs.length = atoi(p);
p = strtok(NULL, "*");
for (i = 0; i < cs.length; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += 6; /* skip over "$sxc$*" */
strtok(ctcopy, "*");
strtok(NULL, "*");
strtok(NULL, "*");
strtok(NULL, "*");
p = strtok(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char key[MAX_KEYS_PER_CRYPT][32];
unsigned char hash[MAX_KEYS_PER_CRYPT][32];
BF_KEY bf_key;
int bf_ivec_pos;
unsigned char ivec[8];
unsigned char output[1024];
int i;
SHA_CTX ctx;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *)saved_key[index+i], strlen(saved_key[index+i]));
SHA1_Final((unsigned char *)hash[i], &ctx);
}
#ifdef MMX_COEF
{
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 20;
pin[i] = (unsigned char*)hash[i];
pout[i] = key[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, pout,
cur_salt->key_size, 0);
}
#else
pbkdf2_sha1(hash[0], 20, cur_salt->salt,
cur_salt->salt_length,
cur_salt->iterations, key[0],
cur_salt->key_size, 0);
#if !ARCH_LITTLE_ENDIAN
for (i = 0; i < cur_salt->key_size/sizeof(ARCH_WORD_32); ++i) {
((ARCH_WORD_32*)key[0])[i] = JOHNSWAP(((ARCH_WORD_32*)key[0])[i]);
}
#endif
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
bf_ivec_pos = 0;
memcpy(ivec, cur_salt->iv, 8);
BF_set_key(&bf_key, cur_salt->key_size, key[i]);
BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0);
SHA1_Init(&ctx);
SHA1_Update(&ctx, output, cur_salt->original_length);
SHA1_Final((unsigned char*)crypt_out[index+i], &ctx);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void sxc_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
#if FMT_MAIN_VERSION > 11
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
#endif
struct fmt_main fmt_sxc = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{
"iteration count",
},
#endif
sxc_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{
iteration_count,
},
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
sxc_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
matrix.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M AAA TTTTT RRRR IIIII X X %
% MM MM A A T R R I X X %
% M M M AAAAA T RRRR I X %
% M M A A T R R I X X %
% M M A A T R R IIIII X X %
% %
% %
% MagickCore Matrix Methods %
% %
% Software Design %
% Cristy %
% August 2007 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image-private.h"
#include "magick/matrix.h"
#include "magick/memory_.h"
#include "magick/pixel-private.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
/*
Typedef declaration.
*/
struct _MatrixInfo
{
CacheType
type;
size_t
columns,
rows,
stride;
MagickSizeType
length;
MagickBooleanType
mapped,
synchronize;
char
path[MaxTextExtent];
int
file;
void
*elements;
SemaphoreInfo
*semaphore;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMatrixInfo() allocates the ImageInfo structure.
%
% The format of the AcquireMatrixInfo method is:
%
% MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows,
% const size_t stride,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: the matrix columns.
%
% o rows: the matrix rows.
%
% o stride: the matrix stride.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(SIGBUS)
static void MatrixSignalHandler(int status)
{
ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache");
}
#endif
static inline MagickOffsetType WriteMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PWRITE)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
static MagickBooleanType SetMatrixExtent(
MatrixInfo *magick_restrict matrix_info,MagickSizeType length)
{
MagickOffsetType
count,
extent,
offset;
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (matrix_info->synchronize != MagickFalse)
(void) posix_fallocate(matrix_info->file,offset+1,extent-offset);
#endif
#if defined(SIGBUS)
(void) signal(SIGBUS,MatrixSignalHandler);
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns,
const size_t rows,const size_t stride,ExceptionInfo *exception)
{
char
*synchronize;
MagickBooleanType
status;
MatrixInfo
*matrix_info;
matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info));
if (matrix_info == (MatrixInfo *) NULL)
return((MatrixInfo *) NULL);
(void) memset(matrix_info,0,sizeof(*matrix_info));
matrix_info->signature=MagickCoreSignature;
matrix_info->columns=columns;
matrix_info->rows=rows;
matrix_info->stride=stride;
matrix_info->semaphore=AllocateSemaphoreInfo();
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
matrix_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
matrix_info->length=(MagickSizeType) columns*rows*stride;
if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=MemoryCache;
status=AcquireMagickResource(AreaResource,matrix_info->length);
if ((status != MagickFalse) &&
(matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length)))
{
status=AcquireMagickResource(MemoryResource,matrix_info->length);
if (status != MagickFalse)
{
matrix_info->mapped=MagickFalse;
matrix_info->elements=AcquireMagickMemory((size_t)
matrix_info->length);
if (matrix_info->elements == NULL)
{
matrix_info->mapped=MagickTrue;
matrix_info->elements=MapBlob(-1,IOMode,0,(size_t)
matrix_info->length);
}
if (matrix_info->elements == (unsigned short *) NULL)
RelinquishMagickResource(MemoryResource,matrix_info->length);
}
}
matrix_info->file=(-1);
if (matrix_info->elements == (unsigned short *) NULL)
{
status=AcquireMagickResource(DiskResource,matrix_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'","matrix cache");
return(DestroyMatrixInfo(matrix_info));
}
matrix_info->type=DiskCache;
matrix_info->file=AcquireUniqueFileResource(matrix_info->path);
if (matrix_info->file == -1)
return(DestroyMatrixInfo(matrix_info));
status=AcquireMagickResource(MapResource,matrix_info->length);
if (status != MagickFalse)
{
status=SetMatrixExtent(matrix_info,matrix_info->length);
if (status != MagickFalse)
matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0,
(size_t) matrix_info->length);
if (matrix_info->elements != NULL)
matrix_info->type=MapCache;
else
RelinquishMagickResource(MapResource,matrix_info->length);
}
}
return(matrix_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireMagickMatrix() allocates and returns a matrix in the form of an
% array of pointers to an array of doubles, with all values pre-set to zero.
%
% This used to generate the two dimensional matrix, and vectors required
% for the GaussJordanElimination() method below, solving some system of
% simultanious equations.
%
% The format of the AcquireMagickMatrix method is:
%
% double **AcquireMagickMatrix(const size_t number_rows,
% const size_t size)
%
% A description of each parameter follows:
%
% o number_rows: the number pointers for the array of pointers
% (first dimension).
%
% o size: the size of the array of doubles each pointer points to
% (second dimension).
%
*/
MagickExport double **AcquireMagickMatrix(const size_t number_rows,
const size_t size)
{
double
**matrix;
register ssize_t
i,
j;
matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix));
if (matrix == (double **) NULL)
return((double **) NULL);
for (i=0; i < (ssize_t) number_rows; i++)
{
matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i]));
if (matrix[i] == (double *) NULL)
{
for (j=0; j < i; j++)
matrix[j]=(double *) RelinquishMagickMemory(matrix[j]);
matrix=(double **) RelinquishMagickMemory(matrix);
return((double **) NULL);
}
for (j=0; j < (ssize_t) size; j++)
matrix[i][j]=0.0;
}
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y M a t r i x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyMatrixInfo() dereferences a matrix, deallocating memory associated
% with the matrix.
%
% The format of the DestroyImage method is:
%
% MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
LockSemaphoreInfo(matrix_info->semaphore);
switch (matrix_info->type)
{
case MemoryCache:
{
if (matrix_info->mapped == MagickFalse)
matrix_info->elements=RelinquishMagickMemory(matrix_info->elements);
else
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=(unsigned short *) NULL;
}
RelinquishMagickResource(MemoryResource,matrix_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length);
matrix_info->elements=NULL;
RelinquishMagickResource(MapResource,matrix_info->length);
}
case DiskCache:
{
if (matrix_info->file != -1)
(void) close(matrix_info->file);
(void) RelinquishUniqueFileResource(matrix_info->path);
RelinquishMagickResource(DiskResource,matrix_info->length);
break;
}
default:
break;
}
UnlockSemaphoreInfo(matrix_info->semaphore);
DestroySemaphoreInfo(&matrix_info->semaphore);
return((MatrixInfo *) RelinquishMagickMemory(matrix_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a u s s J o r d a n E l i m i n a t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GaussJordanElimination() returns a matrix in reduced row echelon form,
% while simultaneously reducing and thus solving the augumented results
% matrix.
%
% See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
% The format of the GaussJordanElimination method is:
%
% MagickBooleanType GaussJordanElimination(double **matrix,
% double **vectors,const size_t rank,const size_t number_vectors)
%
% A description of each parameter follows:
%
% o matrix: the matrix to be reduced, as an 'array of row pointers'.
%
% o vectors: the additional matrix argumenting the matrix for row reduction.
% Producing an 'array of column vectors'.
%
% o rank: The size of the matrix (both rows and columns). Also represents
% the number terms that need to be solved.
%
% o number_vectors: Number of vectors columns, argumenting the above matrix.
% Usually 1, but can be more for more complex equation solving.
%
% Note that the 'matrix' is given as a 'array of row pointers' of rank size.
% That is values can be assigned as matrix[row][column] where 'row' is
% typically the equation, and 'column' is the term of the equation.
% That is the matrix is in the form of a 'row first array'.
%
% However 'vectors' is a 'array of column pointers' which can have any number
% of columns, with each column array the same 'rank' size as 'matrix'.
%
% This allows for simpler handling of the results, especially is only one
% column 'vector' is all that is required to produce the desired solution.
%
% For example, the 'vectors' can consist of a pointer to a simple array of
% doubles. when only one set of simultanious equations is to be solved from
% the given set of coefficient weighted terms.
%
% double **matrix = AcquireMagickMatrix(8UL,8UL);
% double coefficents[8];
% ...
% GaussJordanElimination(matrix, &coefficents, 8UL, 1UL);
%
% However by specifing more 'columns' (as an 'array of vector columns', you
% can use this function to solve a set of 'separable' equations.
%
% For example a distortion function where u = U(x,y) v = V(x,y)
% And the functions U() and V() have separate coefficents, but are being
% generated from a common x,y->u,v data set.
%
% Another example is generation of a color gradient from a set of colors at
% specific coordients, such as a list x,y -> r,g,b,a.
%
% You can also use the 'vectors' to generate an inverse of the given 'matrix'
% though as a 'column first array' rather than a 'row first array'. For
% details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination
%
*/
MagickExport MagickBooleanType GaussJordanElimination(double **matrix,
double **vectors,const size_t rank,const size_t number_vectors)
{
#define GaussJordanSwap(x,y) \
{ \
if ((x) != (y)) \
{ \
(x)+=(y); \
(y)=(x)-(y); \
(x)=(x)-(y); \
} \
}
double
max,
scale;
register ssize_t
i,
j,
k;
ssize_t
column,
*columns,
*pivots,
row,
*rows;
columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns));
rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows));
pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots));
if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) ||
(pivots == (ssize_t *) NULL))
{
if (pivots != (ssize_t *) NULL)
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
if (columns != (ssize_t *) NULL)
columns=(ssize_t *) RelinquishMagickMemory(columns);
if (rows != (ssize_t *) NULL)
rows=(ssize_t *) RelinquishMagickMemory(rows);
return(MagickFalse);
}
(void) memset(columns,0,rank*sizeof(*columns));
(void) memset(rows,0,rank*sizeof(*rows));
(void) memset(pivots,0,rank*sizeof(*pivots));
column=0;
row=0;
for (i=0; i < (ssize_t) rank; i++)
{
max=0.0;
for (j=0; j < (ssize_t) rank; j++)
if (pivots[j] != 1)
{
for (k=0; k < (ssize_t) rank; k++)
if (pivots[k] != 0)
{
if (pivots[k] > 1)
return(MagickFalse);
}
else
if (fabs(matrix[j][k]) >= max)
{
max=fabs(matrix[j][k]);
row=j;
column=k;
}
}
pivots[column]++;
if (row != column)
{
for (k=0; k < (ssize_t) rank; k++)
GaussJordanSwap(matrix[row][k],matrix[column][k]);
for (k=0; k < (ssize_t) number_vectors; k++)
GaussJordanSwap(vectors[k][row],vectors[k][column]);
}
rows[i]=row;
columns[i]=column;
if (matrix[column][column] == 0.0)
return(MagickFalse); /* sigularity */
scale=PerceptibleReciprocal(matrix[column][column]);
matrix[column][column]=1.0;
for (j=0; j < (ssize_t) rank; j++)
matrix[column][j]*=scale;
for (j=0; j < (ssize_t) number_vectors; j++)
vectors[j][column]*=scale;
for (j=0; j < (ssize_t) rank; j++)
if (j != column)
{
scale=matrix[j][column];
matrix[j][column]=0.0;
for (k=0; k < (ssize_t) rank; k++)
matrix[j][k]-=scale*matrix[column][k];
for (k=0; k < (ssize_t) number_vectors; k++)
vectors[k][j]-=scale*vectors[k][column];
}
}
for (j=(ssize_t) rank-1; j >= 0; j--)
if (columns[j] != rows[j])
for (i=0; i < (ssize_t) rank; i++)
GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]);
pivots=(ssize_t *) RelinquishMagickMemory(pivots);
rows=(ssize_t *) RelinquishMagickMemory(rows);
columns=(ssize_t *) RelinquishMagickMemory(columns);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x C o l u m n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixColumns() returns the number of columns in the matrix.
%
% The format of the GetMatrixColumns method is:
%
% size_t GetMatrixColumns(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info)
{
assert(matrix_info != (MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->columns);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixElement() returns the specifed element in the matrix.
%
% The format of the GetMatrixElement method is:
%
% MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: return the matrix element in this buffer.
%
*/
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline MagickOffsetType ReadMatrixElements(
const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
LockSemaphoreInfo(matrix_info->semaphore);
if (lseek(matrix_info->file,offset,SEEK_SET) < 0)
{
UnlockSemaphoreInfo(matrix_info->semaphore);
return((MagickOffsetType) -1);
}
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
#if !defined(MAGICKCORE_HAVE_PREAD)
UnlockSemaphoreInfo(matrix_info->semaphore);
#endif
return(i);
}
MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+
EdgeX(x,matrix_info->columns);
if (matrix_info->type != DiskCache)
{
(void) memcpy(value,(unsigned char *) matrix_info->elements+i*
matrix_info->stride,matrix_info->stride);
return(MagickTrue);
}
count=ReadMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t M a t r i x R o w s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMatrixRows() returns the number of rows in the matrix.
%
% The format of the GetMatrixRows method is:
%
% size_t GetMatrixRows(const MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info)
{
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
return(matrix_info->rows);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e a s t S q u a r e s A d d T e r m s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LeastSquaresAddTerms() adds one set of terms and associate results to the
% given matrix and vectors for solving using least-squares function fitting.
%
% The format of the AcquireMagickMatrix method is:
%
% void LeastSquaresAddTerms(double **matrix,double **vectors,
% const double *terms,const double *results,const size_t rank,
% const size_t number_vectors);
%
% A description of each parameter follows:
%
% o matrix: the square matrix to add given terms/results to.
%
% o vectors: the result vectors to add terms/results to.
%
% o terms: the pre-calculated terms (without the unknown coefficent
% weights) that forms the equation being added.
%
% o results: the result(s) that should be generated from the given terms
% weighted by the yet-to-be-solved coefficents.
%
% o rank: the rank or size of the dimensions of the square matrix.
% Also the length of vectors, and number of terms being added.
%
% o number_vectors: Number of result vectors, and number or results being
% added. Also represents the number of separable systems of equations
% that is being solved.
%
% Example of use...
%
% 2 dimensional Affine Equations (which are separable)
% c0*x + c2*y + c4*1 => u
% c1*x + c3*y + c5*1 => v
%
% double **matrix = AcquireMagickMatrix(3UL,3UL);
% double **vectors = AcquireMagickMatrix(2UL,3UL);
% double terms[3], results[2];
% ...
% for each given x,y -> u,v
% terms[0] = x;
% terms[1] = y;
% terms[2] = 1;
% results[0] = u;
% results[1] = v;
% LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL);
% ...
% if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) {
% c0 = vectors[0][0];
% c2 = vectors[0][1];
% c4 = vectors[0][2];
% c1 = vectors[1][0];
% c3 = vectors[1][1];
% c5 = vectors[1][2];
% }
% else
% printf("Matrix unsolvable\n);
% RelinquishMagickMatrix(matrix,3UL);
% RelinquishMagickMatrix(vectors,2UL);
%
*/
MagickExport void LeastSquaresAddTerms(double **matrix,double **vectors,
const double *terms,const double *results,const size_t rank,
const size_t number_vectors)
{
register ssize_t
i,
j;
for (j=0; j < (ssize_t) rank; j++)
{
for (i=0; i < (ssize_t) rank; i++)
matrix[i][j]+=terms[i]*terms[j];
for (i=0; i < (ssize_t) number_vectors; i++)
vectors[i][j]+=results[i]*terms[j];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a t r i x T o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MatrixToImage() returns a matrix as an image. The matrix elements must be
% of type double otherwise nonsense is returned.
%
% The format of the MatrixToImage method is:
%
% Image *MatrixToImage(const MatrixInfo *matrix_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info,
ExceptionInfo *exception)
{
CacheView
*image_view;
double
max_value,
min_value,
scale_factor,
value;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (matrix_info->stride < sizeof(double))
return((Image *) NULL);
/*
Determine range of matrix.
*/
(void) GetMatrixElement(matrix_info,0,0,&value);
min_value=value;
max_value=value;
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) matrix_info->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
if (value < min_value)
min_value=value;
else
if (value > max_value)
max_value=value;
}
}
if ((min_value == 0.0) && (max_value == 0.0))
scale_factor=0;
else
if (min_value == max_value)
{
scale_factor=(double) QuantumRange/min_value;
min_value=0;
}
else
scale_factor=(double) QuantumRange/(max_value-min_value);
/*
Convert matrix to image.
*/
image=AcquireImage((ImageInfo *) NULL);
image->columns=matrix_info->columns;
image->rows=matrix_info->rows;
image->colorspace=GRAYColorspace;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
value;
register PixelPacket
*q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse)
continue;
value=scale_factor*(value-min_value);
q->red=ClampToQuantum(value);
q->green=q->red;
q->blue=q->red;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N u l l M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NullMatrix() sets all elements of the matrix to zero.
%
% The format of the memset method is:
%
% MagickBooleanType *NullMatrix(MatrixInfo *matrix_info)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix.
%
*/
MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info)
{
register ssize_t
x;
ssize_t
count,
y;
unsigned char
value;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
if (matrix_info->type != DiskCache)
{
(void) memset(matrix_info->elements,0,(size_t)
matrix_info->length);
return(MagickTrue);
}
value=0;
(void) lseek(matrix_info->file,0,SEEK_SET);
for (y=0; y < (ssize_t) matrix_info->rows; y++)
{
for (x=0; x < (ssize_t) matrix_info->length; x++)
{
count=write(matrix_info->file,&value,sizeof(value));
if (count != (ssize_t) sizeof(value))
break;
}
if (x < (ssize_t) matrix_info->length)
break;
}
return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e l i n q u i s h M a g i c k M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RelinquishMagickMatrix() frees the previously acquired matrix (array of
% pointers to arrays of doubles).
%
% The format of the RelinquishMagickMatrix method is:
%
% double **RelinquishMagickMatrix(double **matrix,
% const size_t number_rows)
%
% A description of each parameter follows:
%
% o matrix: the matrix to relinquish
%
% o number_rows: the first dimension of the acquired matrix (number of
% pointers)
%
*/
MagickExport double **RelinquishMagickMatrix(double **matrix,
const size_t number_rows)
{
register ssize_t
i;
if (matrix == (double **) NULL )
return(matrix);
for (i=0; i < (ssize_t) number_rows; i++)
matrix[i]=(double *) RelinquishMagickMemory(matrix[i]);
matrix=(double **) RelinquishMagickMemory(matrix);
return(matrix);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t M a t r i x E l e m e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetMatrixElement() sets the specifed element in the matrix.
%
% The format of the SetMatrixElement method is:
%
% MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
% const ssize_t x,const ssize_t y,void *value)
%
% A description of each parameter follows:
%
% o matrix_info: the matrix columns.
%
% o x: the matrix x-offset.
%
% o y: the matrix y-offset.
%
% o value: set the matrix element to this value.
%
*/
MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info,
const ssize_t x,const ssize_t y,const void *value)
{
MagickOffsetType
count,
i;
assert(matrix_info != (const MatrixInfo *) NULL);
assert(matrix_info->signature == MagickCoreSignature);
i=(MagickOffsetType) y*matrix_info->columns+x;
if ((i < 0) ||
((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length))
return(MagickFalse);
if (matrix_info->type != DiskCache)
{
(void) memcpy((unsigned char *) matrix_info->elements+i*
matrix_info->stride,value,matrix_info->stride);
return(MagickTrue);
}
count=WriteMatrixElements(matrix_info,i*matrix_info->stride,
matrix_info->stride,(unsigned char *) value);
if (count != (MagickOffsetType) matrix_info->stride)
return(MagickFalse);
return(MagickTrue);
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
Quantum
index;
index=pixel;
if (packet_size == 1)
index=(Quantum) ScaleQuantumToChar(index);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index,
exception);
if (type == 0)
SetPixelIndex(image,index,q);
if ((type == 0) && (channels > 1))
return;
color=image->colormap+(ssize_t) GetPixelIndex(image,q);
if (type != 0)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
Quantum
*q;
ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
unsigned char
*p,
*q;
ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
unsigned char
*p;
size_t
count,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0))
return(MagickFalse);
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
switch (psd_info.mode)
{
case LabMode:
{
(void) SetImageColorspace(image,LabColorspace,exception);
break;
}
case CMYKMode:
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
break;
}
case BitmapMode:
case GrayscaleMode:
case DuotoneMode:
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
break;
}
case IndexedMode:
{
psd_info.min_channels=1;
break;
}
case MultichannelMode:
{
if ((psd_info.channels > 0) && (psd_info.channels < 3))
{
psd_info.min_channels=psd_info.channels;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
break;
}
}
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
if (merged == (Image *) NULL)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
ssize_t
i,
j;
unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
const Quantum
*p;
ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
const char
*option;
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
option=GetImageOption(image_info,"psd:write-layers");
if (IsStringFalse(option) != MagickTrue)
{
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
(void) WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
odd-even-merge-sort_hybrid.c | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<omp.h>
#include"mpi.h"
#define MAX(a,b) ((a<b)?b:a)
#define MIN(a,b) ((a>=b)?b:a)
#define ODD(A,n,i) A[n+2*i]
#define EVEN(A,n,i) A[n+2*i+1]
void print_array(int *A,int l,int r)
{
printf("\n");
for(int i=l;i<r;i++)
printf("%3d ",i);
printf("\n");
for(int i=l;i<r;i++)
printf("%3d ",A[i]);
printf("\n");
}
int *generate(int n)
{
int *A=(int *)malloc(sizeof(int)*n);
srand(time(NULL));
for(int i=0;i<n;i++)
A[i]=rand()%n;
return A;
}
int *copy(int *A,int n)
{
int *C=(int *)malloc(sizeof(int)*n);
for(int i=0;i<n;i++)
C[i]=A[i];
return C;
}
int compare(const void *a,const void *b){return ( *(int *)a < *(int *)b )?0:1;}
void validate(int *A1,int *A2,int n){for(int i=0;i<n;i++)if(A1[i]!=A2[i]){printf("Failure\n");return;}printf("Success!\n");}
void odd_even_merge_sort(int *A,int l,int c,int r);
void odd_even_merge(int *A,int l,int c,int r);
void odd_even_merge2(int *A,int s);
void odd_even_sort_omp(int *A,int l,int c,int r);
//int unsort[32]={2,3,18,9,23,11,4,25,0,13,6,21,14,27,1,10,15,5,16,17,8,24,22,12,19,29,26,30,28,7,31,20};
//int unsort_array[16]={2,3,9,11,4,13,6,14,1,10,15,5,16,8,12,7};
//int unsort_array[8]={2,3,4,6,1,5,8,7};
int log_2(int n){int i=0;while(n!=0){n=n>>1;i++;}return i-1;}
int main(int argc,char* argv[])
{
char *n="32";
int N=atoi(argc==2?argv[1]:n);
int *unpsort=generate(N);
int *unqsort=copy(unpsort,N);
int size,rank;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&size);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
double odd_even_parallel_t = omp_get_wtime();
int *divA=(int *)malloc(sizeof(int)*N/size);
int divAsize=N/size;
MPI_Scatter(unpsort,divAsize,MPI_INT,divA,divAsize,MPI_INT,0,MPI_COMM_WORLD);
odd_even_merge_sort(divA,0,divAsize/2,divAsize);
MPI_Gather(divA,divAsize,MPI_INT,unpsort,divAsize,MPI_INT,0,MPI_COMM_WORLD);
odd_even_merge(unpsort,0,N/2,N);
odd_even_parallel_t = omp_get_wtime()-odd_even_parallel_t;
MPI_Barrier(MPI_COMM_WORLD);
if(rank==0)
{
double qsort_t = omp_get_wtime();
qsort(unqsort,N,sizeof(int),&compare);
qsort_t=omp_get_wtime()-qsort_t;
//print_array(unqsort,0,N);
validate(unpsort,unqsort,N);
printf("qsort=%lf,Parallel = %lf sec (%lf times speedup)\n",
qsort_t,
odd_even_parallel_t,(qsort_t/odd_even_parallel_t));
}
MPI_Finalize();
return 0;
}
void odd_even_merge(int *A,int l,int c,int r)
{
/**
printf("enter odd_even_merge(n=%d)\n",n);
print_array(A,l,r);
**/
int n=c-l;
int *D=(int *)malloc(sizeof(int)*n);
int *E=(int *)malloc(sizeof(int)*n);
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
{
int t0=0,t1=0;
for(int i=0;i<n;i++)
{
if( t0 == n/2 || ( t1 != n/2 && ODD(A,l,t0) > ODD(A,c,t1) ) )
D[i]=ODD(A,c,t1++);
else
D[i]=ODD(A,l,t0++);
}
}
#pragma omp section
{
int t2=0,t3=0;
for(int i=0;i<n;i++)
{
if( t2 == n/2 || ( t3 != n/2 && EVEN(A,l,t2) > EVEN(A,c,t3)) )
E[i]=EVEN(A,c,t3++);
else
E[i]=EVEN(A,l,t2++);
}
}
}
//printf("D:");print_array(D,0,n);
//printf("E:");print_array(E,0,n);
A[l]=D[0];
for(int i=1;i<n;i++)
{
A[l+2*i-1]=MIN(D[i],E[i-1]);
A[l+2*i]=MAX(D[i],E[i-1]);
}
A[r-1]=E[n-1];
//print_array(A,l,n);
}
void odd_even_merge2(int *A,int s)
{
int TMP[4]={A[s+0],A[s+1],A[s+2],A[s+3]};
A[s+0]=MIN(MIN(TMP[0],TMP[1]),MIN(TMP[2],TMP[3]));
A[s+1]=MIN(MAX(MIN(TMP[0],TMP[1]),MIN(TMP[2],TMP[3])),MIN(MAX(TMP[0],TMP[1]),MAX(TMP[2],TMP[3])));
A[s+2]=MAX(MAX(MIN(TMP[0],TMP[1]),MIN(TMP[2],TMP[3])),MIN(MAX(TMP[0],TMP[1]),MAX(TMP[2],TMP[3])));
A[s+3]=MAX(MAX(TMP[0],TMP[1]),MAX(TMP[2],TMP[3]));
}
void odd_even_merge_sort(int *A,int l,int c,int r)
{
//printf("odd_even_merge_sort(%d,%d,%d)\n",l,c,r);
//print_array(A,l,r);
if(c-l==4)
{
odd_even_merge2(A,l);
odd_even_merge2(A,c);
odd_even_merge(A,l,c,r);
return;
}
odd_even_merge_sort(A,l,(l+c)/2,c);
odd_even_merge_sort(A,c,(c+r)/2,r);
odd_even_merge(A,l,c,r);
}
|
mmtiled.c | // -----------------------------------------------------------------------------
//
// "CAPIPrecis"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : mmtiled.c
// Create : 2019-09-28 14:41:30
// Revise : 2019-11-29 11:17:40
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "timer.h"
#include "myMalloc.h"
#include "config.h"
#include "libcxl.h"
#include "capienv.h"
#include "mmtiled.h"
struct MatrixArrays *newMatrixArrays(struct Arguments *arguments)
{
struct MatrixArrays *matrixArrays = (struct MatrixArrays *) my_malloc(sizeof(struct MatrixArrays));
matrixArrays->size_n = arguments->size;
matrixArrays->size_tile = arguments->cu_config_2;
matrixArrays->A = (uint32_t *) my_malloc(sizeof(uint32_t) * (matrixArrays->size_n) * (matrixArrays->size_n));
matrixArrays->B = (uint32_t *) my_malloc(sizeof(uint32_t) * (matrixArrays->size_n) * (matrixArrays->size_n));
matrixArrays->C = (uint32_t *) my_malloc(sizeof(uint32_t) * (matrixArrays->size_n) * (matrixArrays->size_n));
return matrixArrays;
}
void freeMatrixArrays(struct MatrixArrays *matrixArrays)
{
if(matrixArrays)
{
if(matrixArrays->A)
free(matrixArrays->A);
if(matrixArrays->B)
free(matrixArrays->B);
if(matrixArrays->C)
free(matrixArrays->C);
free(matrixArrays);
}
}
void initializeMatrixArrays(struct MatrixArrays *matrixArrays)
{
uint64_t i;
uint64_t j;
#pragma omp parallel for private(j)
for(i = 0; i < matrixArrays->size_n; i++)
{
for(j = 0; j < matrixArrays->size_n; j++)
{
// matrixArrays->A[(i * matrixArrays->size_n) + j] = generateRandInt(mt19937var) % 512;
// matrixArrays->B[(i * matrixArrays->size_n) + j] = generateRandInt(mt19937var) % 512;
matrixArrays->A[(i * matrixArrays->size_n) + j] = (i * matrixArrays->size_n) + j;
matrixArrays->B[(i * matrixArrays->size_n) + j] = (i * matrixArrays->size_n) + j;
matrixArrays->C[(i * matrixArrays->size_n) + j] = (i * matrixArrays->size_n) + j;
}
}
}
void resetMatrixArrays(struct MatrixArrays *matrixArrays)
{
uint64_t i;
uint64_t j;
#pragma omp parallel for private(j)
for(i = 0; i < matrixArrays->size_n; i++)
{
for(j = 0; j < matrixArrays->size_n; j++)
{
matrixArrays->C[(i * matrixArrays->size_n) + j] = (i * matrixArrays->size_n) + j;
}
}
}
uint64_t compareMatrixArrays(struct MatrixArrays *matrixArrays1, struct MatrixArrays *matrixArrays2)
{
uint64_t missmatch = 0;
uint64_t i;
uint64_t j;
if(matrixArrays1->size_n != matrixArrays2->size_n)
return 1;
#pragma omp parallel for shared(matrixArrays1,matrixArrays2) private(j) reduction(+: missmatch)
for(i = 0; i < matrixArrays1->size_n; i++)
{
for(j = 0; j < matrixArrays1->size_n; j++)
{
if( matrixArrays1->A[(i * matrixArrays1->size_n) + j] != matrixArrays2->A[(i * matrixArrays2->size_n) + j]
|| matrixArrays1->B[(i * matrixArrays1->size_n) + j] != matrixArrays2->B[(i * matrixArrays2->size_n) + j]
|| matrixArrays1->C[(i * matrixArrays1->size_n) + j] != matrixArrays2->C[(i * matrixArrays2->size_n) + j])
{
// printf("[%llu] %u != %u\n", i, dataArrays->array_receive[i], dataArrays->array_send[i] );
missmatch ++;
}
}
}
return missmatch;
}
uint64_t checksumMatrixArrays(struct MatrixArrays *matrixArrays)
{
uint64_t checksum = 0;
uint64_t i;
uint64_t j;
#pragma omp parallel for shared(matrixArrays) private(j) reduction(+: checksum)
for(i = 0; i < matrixArrays->size_n; i++)
{
for(j = 0; j < matrixArrays->size_n; j++)
{
checksum += matrixArrays->C[(i * matrixArrays->size_n) + j];
}
}
return checksum;
}
void matrixTranspose(struct MatrixArrays *matrixArrays)
{
uint64_t i;
uint64_t j;
uint32_t temp;
for(i = 0; i < matrixArrays->size_n; i++)
{
#pragma omp parallel for private(temp)
for(j = i + 1; j < matrixArrays->size_n; j++)
{
temp = matrixArrays->B[(i * matrixArrays->size_n) + j];
matrixArrays->B[(i * matrixArrays->size_n) + j] = matrixArrays->B[(j * matrixArrays->size_n) + i];
matrixArrays->B[(j * matrixArrays->size_n) + i] = temp;
}
}
}
void matrixMultiplyStandard(struct MatrixArrays *matrixArrays)
{
uint64_t i;
uint64_t j;
uint64_t k;
uint32_t sum;
#pragma omp parallel for private(j,k,sum) schedule(dynamic)
for(i = 0; i < matrixArrays->size_n; i++)
{
for(j = 0; j < matrixArrays->size_n; j++)
{
sum = 0;
for(k = 0; k < matrixArrays->size_n; k++)
{
sum += matrixArrays->A[(i * matrixArrays->size_n) + k] * matrixArrays->B[(k * matrixArrays->size_n) + j];
}
matrixArrays->C[(i * matrixArrays->size_n) + j] = sum;
}
}
}
void matrixMultiplyStandardTransposed(struct MatrixArrays *matrixArrays)
{
uint64_t i;
uint64_t j;
uint64_t k;
uint32_t sum;
#pragma omp parallel for private(j,k,sum) schedule(dynamic)
for(i = 0; i < matrixArrays->size_n; i++)
{
for(j = 0; j < matrixArrays->size_n; j++)
{
sum = 0;
for(k = 0; k < matrixArrays->size_n; k++)
{
sum += matrixArrays->A[(i * matrixArrays->size_n) + k] * matrixArrays->B[(j * matrixArrays->size_n) + k];
}
matrixArrays->C[(i * matrixArrays->size_n) + j] = sum;
}
}
}
void matrixMultiplyTiled(struct MatrixArrays *matrixArrays)
{
uint64_t i;
uint64_t j;
uint64_t k;
uint64_t ii;
uint64_t jj;
uint64_t kk;
uint32_t sum;
#pragma omp parallel for private(j,k,ii,jj,kk,sum) schedule(dynamic)
for(i = 0; i < matrixArrays->size_n; i += matrixArrays->size_tile)
{
for(j = 0; j < matrixArrays->size_n; j += matrixArrays->size_tile)
{
for(k = 0; k < matrixArrays->size_n; k += matrixArrays->size_tile)
{
for (ii = i; ii < MIN(i + matrixArrays->size_tile, matrixArrays->size_n); ii++)
{
for (jj = j; jj < MIN(j + matrixArrays->size_tile, matrixArrays->size_n); jj++)
{
sum = 0;
//#pragma omp parallel for reduction(+:sum)
for (kk = k; kk < MIN(k + matrixArrays->size_tile, matrixArrays->size_n); kk++)
{
sum += matrixArrays->A[(ii * matrixArrays->size_n) + kk] * matrixArrays->B[(kk * matrixArrays->size_n) + jj];
}
matrixArrays->C[(ii * matrixArrays->size_n) + jj] += sum;
}
}
}
}
}
}
void matrixMultiplyTiledTransposed(struct MatrixArrays *matrixArrays, struct Arguments *arguments)
{
uint32_t i;
uint32_t j;
uint32_t k;
struct cxl_afu_h *afu;
// ********************************************************************************************
// *************** MAP CSR DataStructure **************
// ********************************************************************************************
struct WEDStructMM *wed = mapDataMatrixArraysToWED(matrixArrays);
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUMM(&afu, wed);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = 0; // non zero CU triggers the AFU to work
afu_status.cu_config = ((afu_status.cu_config << 32) | (arguments->numThreads));
afu_status.cu_config_2 = 0;
afu_status.cu_stop = wed->size_tile * wed->size_tile;
// ********************************************************************************************
// *************** START AFU **************
// ********************************************************************************************
startAFU(&afu, &afu_status);
for(i = 0; i < matrixArrays->size_n; i += matrixArrays->size_tile)
{
for(j = 0; j < matrixArrays->size_n; j += matrixArrays->size_tile)
{
for(k = 0; k < matrixArrays->size_n; k += matrixArrays->size_tile)
{
// ********************************************************************************************
// *************** START CU **************
// ********************************************************************************************
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((afu_status.cu_config << 32) | (arguments->numThreads));
afu_status.cu_config_2 = ((i << 1) | 1);
afu_status.cu_config_3 = ((j << 1) | 1);
afu_status.cu_config_4 = ((k << 1) | 1);
startCU(&afu, &afu_status);
// ********************************************************************************************
// *************** WAIT AFU **************
// ********************************************************************************************
waitAFU(&afu, &afu_status);
printMMIO_error(afu_status.error);
}
}
}
releaseAFU(&afu);
free(wed);
}
|
prefilter.h | #pragma once
#if !defined(__PREFILTER_H)
#define __PREFILTER_H
#include <mitsuba/core/plugin.h>
#include <mitsuba/core/bitmap.h>
#include <mitsuba/core/fstream.h>
#include <mitsuba/render/util.h>
#include <mitsuba/render/bsdf.h>
#include <mitsuba/render/sampler.h>
#include <boost/lexical_cast.hpp>
#include <Eigen/Dense>
#include <vector>
#include <string>
/**
* NOTE ABOUT CUBEMAP COORDINATE SYSTEMS:
*
* This code uses a right-handed system everywhere (X right, Y up, -Z forward).
*
* There are some serious sign issues with cube maps in general. Within this code
* base, the cube assumes a right-handed system where each cube face is, starting
* from looking down -Z with +X right and +Y up:
* +X: turn right 90 degrees
* -X: turn left 90 degrees
* +Y: turn up 90 degrees
* -Y: turn down 90 degrees
* +Z: turn around 180 degrees about Y
* -Z: no change
*
* HOWEVER, OpenGL seems to have a left-handed system for cube maps with +Z forward
* and with Y inverted (pointing down). Further, libraries that wrap OpenGL
* (e.g. three.js) seem to flip things around even more, sometimes inconsistently.
* Therefore, I am going to work within the above right-handed system for this code
* base, and worry about translating to a particular rendering library separately.
*/
MTS_NAMESPACE_BEGIN
typedef Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor> matX;
typedef Eigen::Matrix<int, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor> matXi;
typedef Eigen::Matrix<float, Eigen::Dynamic, 1, Eigen::ColMajor> vecX;
typedef Eigen::Matrix<float, 3, 3, Eigen::ColMajor> mat3;
typedef Eigen::Matrix<float, 4, 4, Eigen::ColMajor> mat4;
typedef Eigen::Matrix<Spectrum, 4, 4, Eigen::ColMajor> mat4s;
typedef Eigen::Matrix<float, 2, 1, Eigen::ColMajor> vec2;
typedef Eigen::Matrix<float, 3, 1, Eigen::ColMajor> vec3;
typedef Eigen::Array<float, 9, 1, Eigen::ColMajor> arr9;
typedef Eigen::Array<Spectrum, 9, 1, Eigen::ColMajor> arr9s;
typedef Eigen::Array<float, 3, 1, Eigen::ColMajor> arr3;
typedef Eigen::AlignedBox<int, 2> ibbox2;
typedef Eigen::Matrix<int, 2, 1> ivec2;
using std::vector;
using std::string;
using std::abs;
enum ECubeSide { POSX, NEGX, POSY, NEGY, POSZ, NEGZ };
/**
* Returns true if two vectors are approximately equal
*/
template <typename A, typename B>
inline bool approx_equals(A a , B b, float epsilon = 1e-5) {
if (a.size() != b.size()) return false;
for (int i = 0; i < a.size(); ++i) {
if (abs(a.coeff(i) - b.coeff(i)) > epsilon) {
return false;
}
}
return true;
}
/**
* Compute the homography between mappings of points a --> b, returning M such that
* <constant> * [ b' 1 ]' = M * [ a' 1 ]'
*/
mat3 compute_homography(const vec2 *a, const vec2 *b, int n = 4);
bool test_compute_homography();
/**
* Applies a homography and projects back to cartesian coordinates
*/
inline vec2 apply_homography(const mat3& M, const vec2& a) {
vec3 b = M * vec3(a[0], a[1], 1.0);
return vec2(b[0] / b[2], b[1] / b[2]);
}
/**
* Wrapper around envmap lookup
*/
template <typename V3>
inline Spectrum lookup_envmap(const Emitter* env, const V3& v) {
return env->evalEnvironment(RayDifferential(
Point3(0,0,0), Vector3(v[0], v[1], v[2]), 0.0));
}
/**
* Converts a single image into an entire cube map as follows:
*
* /-------------------\ ^
* | \ +Y / | | margin * height
* | --------------- | v
* | | | |
* |-X| -Z |+X|
* | | | |
* | --------------- | ^
* | / -Y \ | | margin * height
* \-------------------/ v
* <--> margin * width
* <--> margin * width
*/
class HomographyEnvmap : public Emitter {
public:
HomographyEnvmap(ref<Bitmap> img_, float margin_);
HomographyEnvmap(Stream *stream, InstanceManager *manager) : Emitter(stream, manager) {
Log(EError, "TODO");
}
inline Spectrum evalEnvironment(const RayDifferential& ray) const {
const Vector3& v = ray.d;
int idx;
arr3(v[0], v[1], v[2]).abs().maxCoeff(&idx);
const float x = v[0], y = v[1], z = v[2];
vec2 uv;
switch (idx) {
case 0:
if (x > 0) { // +X
// map right half of cubeface to left half of image
uv = apply_homography(M[0], vec2(
0.5 * (-abs(z) / x) + 0.5,
0.5 * (y / x) + 0.5) );
} else { // -X
// map left half of cubeface to right half of image
uv = apply_homography(M[1], vec2(
0.5 * (abs(z) / (-x)) + 0.5,
0.5 * (y / (-x)) + 0.5) );
}
break;
case 1:
if (y > 0) { // +Y
uv = apply_homography(M[2], vec2( // map to bottom half
0.5 * (x / y) + 0.5,
0.5 * (-abs(z) / y) + 0.5) );
} else { // -Y
uv = apply_homography(M[3], vec2( // map to top half
0.5 * (x / (-y)) + 0.5,
0.5 * (abs(z) / (-y)) + 0.5) );
}
break;
case 2: // +/- Z
uv = apply_homography(M[4], vec2( // map to entire square
0.5 * (x / abs(z)) + 0.5,
0.5 * (y / abs(z)) + 0.5) );
break;
}
// fetch pixel, clamping to edge
return img->getPixel(Point2i(
clamp<int>(uv[0] * img->getWidth(), 0, img->getWidth() - 1),
clamp<int>(uv[1] * img->getHeight(), 0, img->getHeight() - 1)));
}
AABB getAABB() const {
return AABB(Point(0,0,0));
}
private:
mat3 M[5]; // +X, -X, +Y, -Y, +/- Z
ref<Bitmap> img;
};
/**
* Functional test for HomographyEnvmap -- outputs images into current directory
*/
void test_HomographyEnvmap();
/**
* Computes the cubemap, assuming OpenGL ordering
* (+X, -X, +Y, -Y, +Z, -Z, each side row-major with 0th row at bottom)
*
*
*/
Bitmap* compute_prefiltered_envmap(const Emitter* envmap,
const BSDF* bsdf, Sampler* sampler,
int resolution, int samples);
/**
* Projects a 3D cartesian vector into the first 9 SH coefficients
*/
template <typename V3>
inline arr9 cartesian_to_sh9(const V3 &x) {
arr9 sh;
sh[0] = 0.2820947917738781434740397257804;
sh[1] = 0.4886025119029199215863846228384 * x[1];
sh[2] = 0.4886025119029199215863846228384 * x[2];
sh[3] = 0.4886025119029199215863846228384 * x[0];
sh[4] = 1.0925484305920790705433857058027 * x[1] * x[0];
sh[5] = 1.0925484305920790705433857058027 * x[1] * x[2];
sh[6] = 1.0925484305920790705433857058027 * x[2] * x[0];
sh[7] = 0.3153915652525200060308936902957 * (3 * x[2] * x[2] - 1);
sh[8] = 0.5462742152960395352716928529014 * (x[0] * x[1] - x[1] * x[1]);
return sh;
}
/**
* Computes the first 9 spherical harmonics for an environment map
*/
inline arr9s compute_sh9(const HomographyEnvmap* envmap,
ref<Sampler> sampler_parent, int nsamples) {
arr9s ret = arr9s::Constant(Spectrum(0.0));
#pragma omp parallel for schedule(dynamic, 1)
for (int k = 0; k < 128; ++k) {
arr9s sum = arr9s::Constant(Spectrum(0.0));
// each thread has its own sampler
ref<Sampler> sampler = sampler_parent->clone();
sampler->generate(Point2i(k, 0));
for (int i = 0; i < nsamples; ++i) {
const Vector3 v = Warp::squareToUniformSphere(sampler->next2D());
const arr9 sh9 = cartesian_to_sh9(v);
const Spectrum L = lookup_envmap(envmap, v);
for (int j = 0; j < 9; ++j)
ret[j] += sh9[j] * L;
sampler->advance();
}
#pragma omp critical
{
sum += ret;
}
}
// sampling by solid angle: p(direction) = 1 / 4pi
float scale = 4.0 * M_PI / nsamples;
for (int j = 0; j < 9; ++j)
ret[j] *= scale;
return ret;
}
/**
* Computes the 4x4 matrix used for diffuse lighting
* Uses method from [ Ravi Ramamoorthi and Pat Hanrahan, "An Efficient
* Representation for Irradiance Environment Maps", SIGGRAPH 2001.
* http://graphics.stanford.edu/papers/envmap/envmap.pdf ]
*/
inline mat4s compute_sh9_diffuse_matrix(const HomographyEnvmap* envmap,
ref<Sampler> sampler, int nsamples) {
mat4s ret;
arr9s sh9 = compute_sh9(envmap, sampler, nsamples);
const float c1 = 0.429043, c2 = 0.511664, c3 = 0.743125, c4 = 0.886227, c5 = 0.247708;
ret << c1 * sh9[8], c1 * sh9[4], c1 * sh9[7], c2 * sh9[3],
c1 * sh9[4], -c1 * sh9[8], c1 * sh9[5], c2 * sh9[1],
c1 * sh9[7], c1 * sh9[5], c3 * sh9[6], c2 * sh9[2],
c2 * sh9[3], c2 * sh9[1], c2 * sh9[2], c4 * sh9[0] - c5 * sh9[6];
return ret;
}
inline std::string mat4s_to_glsl_string(const mat4s& m) {
std::stringstream ss;
ss.precision(8);
const char names[3] = {'r', 'g', 'b'};
for (int c = 0; c < 3; ++c) {
ss << "mat4 M" << names[c] << "=mat4(";
for (int i = 0; i < 4; ++i) {
ss << ((i > 0) ? ",vec4(" : "vec4(");
for (int j = 0; j < 4; ++j) {
if (j > 0) ss << ",";
float rgb[3];
// NOTE THE TRANSPOSE -- each vec4 is a column
m(j, i).toLinearRGB(rgb[0], rgb[1], rgb[2]);
ss << rgb[c];
}
ss << ")";
}
ss << ");" << endl;
}
return ss.str();
}
class Prefilter : public Utility {
public:
int run(int argc, char **argv);
MTS_DECLARE_UTILITY()
};
MTS_NAMESPACE_END
#endif
|
threading.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.4 $
***********************************************************************EHEADER*/
#include <stdlib.h>
#include <stdio.h>
#include "utilities.h"
#if defined(HYPRE_USING_OPENMP) || defined (HYPRE_USING_PGCC_SMP)
int
hypre_NumThreads( )
{
int num_threads;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
num_threads = omp_get_num_threads();
#endif
#ifdef HYPRE_USING_PGCC_SMP
num_threads = 2;
#endif
return num_threads;
}
/* This next function must be called from within a parallel region! */
int
hypre_NumActiveThreads( )
{
int num_threads;
num_threads = omp_get_num_threads();
return num_threads;
}
/* This next function must be called from within a parallel region! */
int
hypre_GetThreadNum( )
{
int my_thread_num;
my_thread_num = omp_get_thread_num();
return my_thread_num;
}
#endif
/*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
/* The pthreads stuff needs to be reworked */
#define HYPRE_THREAD_GLOBALS
#ifdef HYPRE_USE_PTHREADS
#ifdef HYPRE_USE_UMALLOC
#include "umalloc_local.h"
#endif
int iteration_counter = 0;
volatile int hypre_thread_counter;
volatile int work_continue = 1;
int HYPRE_InitPthreads( int num_threads )
{
int err;
int i;
hypre_qptr =
(hypre_workqueue_t) malloc(sizeof(struct hypre_workqueue_struct));
hypre_NumThreads = num_threads;
initial_thread = pthread_self();
if (hypre_qptr != NULL) {
pthread_mutex_init(&hypre_qptr->lock, NULL);
pthread_cond_init(&hypre_qptr->work_wait, NULL);
pthread_cond_init(&hypre_qptr->finish_wait, NULL);
hypre_qptr->n_working = hypre_qptr->n_waiting = hypre_qptr->n_queue = 0;
hypre_qptr->inp = hypre_qptr->outp = 0;
for (i=0; i < hypre_NumThreads; i++) {
#ifdef HYPRE_USE_UMALLOC
/* Get initial area to start heap */
hypre_assert ((_uinitial_block[i] = malloc(INITIAL_HEAP_SIZE))!=NULL);
/* Create a user heap */
hypre_assert ((_uparam[i].myheap = _ucreate(initial_block[i],
INITIAL_HEAP_SIZE,
_BLOCK_CLEAN,
_HEAP_REGULAR,
_uget_fn,
_urelease_fn)) != NULL);
#endif
err=pthread_create(&hypre_thread[i], NULL,
(void *(*)(void *))hypre_pthread_worker,
(void *)i);
hypre_assert(err == 0);
}
}
pthread_mutex_init(&hypre_mutex_boxloops, NULL);
pthread_mutex_init(&mpi_mtx, NULL);
pthread_mutex_init(&talloc_mtx, NULL);
pthread_mutex_init(&time_mtx, NULL);
pthread_mutex_init(&worker_mtx, NULL);
hypre_thread_counter = 0;
hypre_thread_release = 0;
return (err);
}
void hypre_StopWorker(void *i)
{
work_continue = 0;
}
void HYPRE_DestroyPthreads( void )
{
int i;
void *status;
for (i=0; i < hypre_NumThreads; i++) {
hypre_work_put(hypre_StopWorker, (void *) &i);
}
#ifdef HYPRE_USE_UMALLOC
for (i=0; i<hypre_NumThreads; i++)
{
_udestroy (_uparam[i].myheap, _FORCE);
}
#endif
for (i=0; i<hypre_NumThreads; i++)
pthread_join(hypre_thread[i], &status);
pthread_mutex_destroy(&hypre_qptr->lock);
pthread_mutex_destroy(&hypre_mutex_boxloops);
pthread_mutex_destroy(&mpi_mtx);
pthread_mutex_destroy(&talloc_mtx);
pthread_mutex_destroy(&time_mtx);
pthread_mutex_destroy(&worker_mtx);
pthread_cond_destroy(&hypre_qptr->work_wait);
pthread_cond_destroy(&hypre_qptr->finish_wait);
free (hypre_qptr);
}
void hypre_pthread_worker( int threadid )
{
void *argptr;
hypre_work_proc_t funcptr;
pthread_mutex_lock(&hypre_qptr->lock);
hypre_qptr->n_working++;
while(work_continue) {
while (hypre_qptr->n_queue == 0) {
if (--hypre_qptr->n_working == 0)
pthread_cond_signal(&hypre_qptr->finish_wait);
hypre_qptr->n_waiting++;
pthread_cond_wait(&hypre_qptr->work_wait, &hypre_qptr->lock);
hypre_qptr->n_waiting--;
hypre_qptr->n_working++;
}
hypre_qptr->n_queue--;
funcptr = hypre_qptr->worker_proc_queue[hypre_qptr->outp];
argptr = hypre_qptr->argqueue[hypre_qptr->outp];
hypre_qptr->outp = (hypre_qptr->outp + 1) % MAX_QUEUE;
pthread_mutex_unlock(&hypre_qptr->lock);
(*funcptr)(argptr);
hypre_barrier(&worker_mtx, 0);
if (work_continue)
pthread_mutex_lock(&hypre_qptr->lock);
}
}
void
hypre_work_put( hypre_work_proc_t funcptr, void *argptr )
{
pthread_mutex_lock(&hypre_qptr->lock);
if (hypre_qptr->n_waiting) {
/* idle workers to be awakened */
pthread_cond_signal(&hypre_qptr->work_wait);
}
hypre_assert(hypre_qptr->n_queue != MAX_QUEUE);
hypre_qptr->n_queue++;
hypre_qptr->worker_proc_queue[hypre_qptr->inp] = funcptr;
hypre_qptr->argqueue[hypre_qptr->inp] = argptr;
hypre_qptr->inp = (hypre_qptr->inp + 1) % MAX_QUEUE;
pthread_mutex_unlock(&hypre_qptr->lock);
}
/* Wait until all work is done and workers quiesce. */
void
hypre_work_wait( void )
{
pthread_mutex_lock(&hypre_qptr->lock);
while(hypre_qptr->n_queue !=0 || hypre_qptr->n_working != 0)
pthread_cond_wait(&hypre_qptr->finish_wait, &hypre_qptr->lock);
pthread_mutex_unlock(&hypre_qptr->lock);
}
int
hypre_fetch_and_add( int *w )
{
int temp;
temp = *w;
*w += 1;
return temp;
}
int
ifetchadd( int *w, pthread_mutex_t *mutex_fetchadd )
{
int n;
pthread_mutex_lock(mutex_fetchadd);
n = *w;
*w += 1;
pthread_mutex_unlock(mutex_fetchadd);
return n;
}
static volatile int thb_count = 0;
static volatile int thb_release = 0;
void hypre_barrier(pthread_mutex_t *mtx, int unthreaded)
{
if (!unthreaded) {
pthread_mutex_lock(mtx);
thb_count++;
if (thb_count < hypre_NumThreads) {
pthread_mutex_unlock(mtx);
while (!thb_release);
pthread_mutex_lock(mtx);
thb_count--;
pthread_mutex_unlock(mtx);
while (thb_release);
}
else if (thb_count == hypre_NumThreads) {
thb_count--;
pthread_mutex_unlock(mtx);
thb_release++;
while (thb_count);
thb_release = 0;
}
}
}
int
hypre_GetThreadID( void )
{
int i;
if (pthread_equal(pthread_self(), initial_thread))
return hypre_NumThreads;
for (i = 0; i < hypre_NumThreads; i++)
{
if (pthread_equal(pthread_self(), hypre_thread[i]))
return i;
}
return -1;
}
#endif
/*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
|
Mod_grib.c | #include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include "grb2.h"
#include "wgrib2.h"
#include "fnlist.h"
/*
* the set options
*
* routines to modify grib fields
*
* 3/2008 Public Domain by Wesley Ebisuzaki
*
*/
extern int header, decode;
extern const char *level_table[192];
extern const char *ncep_level_table[64];
extern int use_scale, dec_scale, bin_scale;
/*
* this takes a string and returns a fixed surface
*/
int parse_level1(unsigned char **sec, const char *string, int *table_4_5, int *scale_factor, int *scale_value) {
int i, n_percent, slen, n, ival, len_string, center;
const char *t;
char tmp_string[100];
float val;
double dval;
len_string = strlen(string);
for (i = 1; i < 192; i++) { // can skip i==0 because it is not allowed resevered
if (strcmp("reserved", level_table[i]) == 0) continue;
// count the number of % characters in the level table
n_percent = 0;
t = level_table[i];
/* %% -> quoted %, otherwise a read */
while (*t) {
if (*t++ == '%') {
if (*t == 0) break;
if (*t++ != '%') n_percent++;
}
}
if (n_percent == 0) {
if (strcmp(string, level_table[i]) == 0) {
*table_4_5 = i;
*scale_factor = *scale_value = 255;
return 0;
}
}
else if (n_percent == 1) {
slen=strlen(level_table[i]);
if (slen > sizeof(tmp_string) - 3) fatal_error("parse_level1: string overflow","");
strncpy(tmp_string,level_table[i],slen);
strncpy(tmp_string+slen,"%n",3);
if (n = -1, sscanf(string,tmp_string,&val,&n), n == len_string) {
dval = (double) val;
*table_4_5 = i;
if (i == 100 || i == 108) dval = dval * 100; // convert mb to Pa
best_scaled_value(dval, &n, &ival);
*scale_factor = INT1(n);
*scale_value = ival;
return 0;
}
}
}
/* check local level tables (see Level.c) */
center = GB2_Center(sec);
if (center == NCEP) {
for (i = 0; i < 64; i++) {
if (strcmp("reserved", ncep_level_table[i]) == 0) continue;
n_percent = 0;
t = ncep_level_table[i];
/* %% -> quoted %, otherwise a read */
while (*t) {
if (*t++ == '%') {
if (*t == 0) break;
if (*t++ != '%') n_percent++;
}
}
if (n_percent == 0) {
if (strcmp(string, ncep_level_table[i]) == 0) {
*table_4_5 = i + 192;
*scale_factor = *scale_value = 255;
return 0;
}
}
else if (n_percent == 1) {
slen=strlen(ncep_level_table[i]);
if (slen > sizeof(tmp_string) - 3) fatal_error("parse_level1: string overflow","");
strncpy(tmp_string,ncep_level_table[i],slen);
strncpy(tmp_string+slen,"%n",3);
if (n = -1, sscanf(string,tmp_string,&val,&n), n == len_string) {
dval = (double) val;
*table_4_5 = i + 192;
if (i + 192 == 235) dval = dval * 10; // convert C to 1/10C
best_scaled_value(dval, &n, &ival);
*scale_factor = INT1(n);
*scale_value = ival;
return 0;
}
}
}
}
return 1;
}
/*
* HEADER:100:set_lev:misc:1:changes level code .. not complete
*/
int f_set_lev(ARG1) {
unsigned char *p1, *p2;
float val1, val2;
int i, n, len_arg1, ival, dash;
char string[STRING_SIZE];
char layer_type1[20], layer_type2[20];
int table_4_5, scale_factor, scale_value;
if (mode < 0) return 0;
len_arg1 = strlen(arg1);
if (len_arg1 > STRING_SIZE+1) fatal_error("set_lev: time string too long","");
if (mode == 99) fprintf(stderr,"set_lev: arg=%s\n", arg1);
/* get fixed surface pointers */
p1 = code_table_4_5a_location(sec);
p2 = code_table_4_5b_location(sec);
if (p1 == NULL) fatal_error("set_lev: PDT does not have fixed surfaces","");
/* set fixed surface to undefined */
for (i = 0; i < 6; i++) p1[i] = (unsigned char) 255;
if (p2 != NULL) {
for (i = 0; i < 6; i++) p2[i] = (unsigned char) 255;
}
// (level)
if (parse_level1(sec, arg1, &table_4_5, &scale_factor, &scale_value) == 0) {
p1[0] = table_4_5;
p1[1] = INT1(scale_factor);
int_char(scale_value, p1+2);
return 0;
}
// (level) - (level)
if (p2 != NULL) {
for (dash = 1; dash < len_arg1-4; dash++) {
if (arg1[dash] != ' ') continue;
if (arg1[dash+1] != '-') continue;
if (arg1[dash+2] != ' ') continue;
for (i = 0; i < dash; i++) {
string[i] = arg1[i];
}
string[dash] = 0;
if (parse_level1(sec, string, &table_4_5, &scale_factor, &scale_value) == 0) {
p1[0] = table_4_5;
p1[1] = INT1(scale_factor);
int_char(scale_value, p1+2);
for (i = dash+3; i <= len_arg1; i++) {
string[i-dash-3] = arg1[i];
}
if (parse_level1(sec, string, &table_4_5, &scale_factor, &scale_value) == 0) {
p2[0] = table_4_5;
p2[1] = INT1(scale_factor);
int_char(scale_value, p2+2);
return 0;
}
else {
for (i = 0; i < 6; i++) p1[i] = (unsigned char) 255;
}
}
}
}
if (mode == 99) fprintf(stderr,"in set_lev arg1=%s\n", arg1);
// n-n (string) layer
if (n=-1, sscanf(arg1,"%g-%g %20s layer%n",&val1,&val2, layer_type1, &n), n == len_arg1) {
layer_type1[19] = 0;
i = -1;
if (strncmp(layer_type1,"sigma",20) == 0) i = 104;
else if (strncmp(layer_type1,"hybrid",20) == 0) i = 105;
else if (strncmp(layer_type1,"Eta",20) == 0) i = 111;
if (i != -1) {
p1[0] = p2[0] = i;
best_scaled_value(val1, &n, &ival);
p1[1] = INT1(n);
int_char(ival, p1+2);
best_scaled_value(val2, &n, &ival);
p2[1] = INT1(n);
int_char(ival, p2+2);
return 0;
}
}
// n-n (string) (string) layer
if (n=-1, sscanf(arg1,"%g-%g %20s %20s layer%n",&val1,&val2, layer_type1, layer_type2, &n), n == len_arg1) {
layer_type1[19] = layer_type2[19] = 0;
i = -1;
if (strncmp(layer_type1,"sigma",20) == 0 && strncmp(layer_type2,"height",20) == 0) i = 115;
else if (strncmp(layer_type1,"hybrid",20) == 0 && strncmp(layer_type2,"height",20) == 0) i = 118;
else if (strncmp(layer_type1,"hybrid",20) == 0 && strncmp(layer_type2,"pressure",20) == 0) i = 119;
else if (strncmp(layer_type1,"m",20) == 0 && strncmp(layer_type2,"ocean",20) == 0) i = 161;
if (i != -1) {
p1[0] = p2[0] = i;
best_scaled_value(val1, &n, &ival);
p1[1] = INT1(n);
int_char(ival, p1+2);
best_scaled_value(val2, &n, &ival);
p2[1] = INT1(n);
int_char(ival, p2+2);
return 0;
}
}
if (n=-1, sscanf(arg1,"%g-%g mb above ground%n",&val1,&val2,&n), n == len_arg1) {
if (p2 == NULL) fatal_error("set_lev: PDT has only 1 fixed surface, set_lev needs 2","");
val1 *= 100.0;
val2 *= 100.0;
p1[0] = p2[0] = 108;
p1[1] = 0;
int_char((int) val1, p1+2);
p2[1] = 0;
int_char((int) val2, p2+2);
return 0;
}
if (n=-1, sscanf(arg1,"%g-%g mb%n",&val1,&val2,&n), n == len_arg1) {
if (p2 == NULL) fatal_error("set_lev: PDT has only 1 fixed surface, set_lev needs 2","");
val1 *= 100.0;
val2 *= 100.0;
p1[0] = p2[0] = 100;
p1[1] = 0;
int_char((int) val1, p1+2);
p2[1] = 0;
int_char((int) val2, p2+2);
return 0;
}
if (n=-1, sscanf(arg1,"%g-%g m below ground%n",&val1,&val2,&n), n == len_arg1) {
if (p2 == NULL) fatal_error("set_lev: PDT has only 1 fixed surface, set_lev needs 2","");
p1[0] = p2[0] = 106;
best_scaled_value(val1, &n, &ival);
p1[1] = INT1(n);
int_char(ival, p1+2);
best_scaled_value(val2, &n, &ival);
p2[1] = INT1(n);
int_char(ival, p2+2);
return 0;
}
if (n=-1, sscanf(arg1,"%g-%g m above ground%n",&val1,&val2,&n), n == len_arg1) {
if (p2 == NULL) fatal_error("set_lev: PDT has only 1 fixed surface, set_lev needs 2","");
p1[0] = p2[0] = 103;
best_scaled_value(val1, &n, &ival);
p1[1] = INT1(n);
int_char(ival, p1+2);
best_scaled_value(val2, &n, &ival);
p2[1] = INT1(n);
int_char(ival, p2+2);
return 0;
}
if (n=-1, sscanf(arg1,"%g-%g m below sea level%n",&val1,&val2,&n), n == len_arg1) {
if (p2 == NULL) fatal_error("set_lev: PDT has only 1 fixed surface, set_lev needs 2","");
p1[0] = p2[0] = 160;
best_scaled_value(val1, &n, &ival);
p1[1] = INT1(n);
int_char(ival, p1+2);
best_scaled_value(val2, &n, &ival);
p2[1] = INT1(n);
int_char(ival, p2+2);
return 0;
}
if (strcmp("atmos col", arg1) == 0 || // wgrib2 compatible
strcmp("Entire atmosphere (considered as a single layer)", arg1) == 0) {
if (p2 == NULL) fatal_error("set_lev: PDT has only 1 fixed surface, set_lev needs 2","");
p1[0] = 1;
p2[0] = 8;
return 0;
}
fatal_error("need to modify set_lev for %s", arg1);
return 0;
}
extern struct gribtable_s gribtable[], *user_gribtable;
/*
* HEADER:100:set_var:misc:1:changes variable name
*
* 1.1 2/2012 WNE: old return 1st match
* new match that is not in local tables,
* if nothing, return match in local tables
* not perfect, doesn't know center.
* 1.2 4/2017 WNE: understands
* var discipline=0 center=34 local_table=1 parmcat=1 parm=203
* var discipline=10 master_table=2 parmcat=0 parm=11
* var10_2_1_7_0_11
*/
int f_set_var(ARG1) {
struct gribtable_s *p;
int i;
unsigned int discipline, mastertab, localtab, center, parmcat, parmnum;
/* this function needs to be called with changing parameters */
if (mode < 0) return 0;
/* var discipline=0 center=34 local_table=1 parmcat=1 parm=203 */
/* var discipline=10 master_table=2 parmcat=0 parm=11 */
/* var10_2_1_7_0_11 */
if (arg1[0] == 'v' && arg1[1] == 'a' && arg1[2] == 'r') {
i = sscanf(arg1,"var%u_%u_%u_%u_%u_%u", &discipline, &mastertab, &localtab, ¢er, &parmcat, &parmnum);
if (i == 6) {
sec[0][6] = discipline;
sec[1][9] = mastertab;
uint2_char(center,sec[1]+5);
sec[1][10] = localtab;
sec[4][9] = parmcat;
sec[4][10] = parmnum;
return 0;
}
i = sscanf(arg1,"var discipline=%u master_table=%u parmcat=%u parm=%u",&discipline,&mastertab,&parmcat,&parmnum);
if (i == 4) {
sec[0][6] = discipline;
sec[1][9] = mastertab;
sec[4][9] = parmcat;
sec[4][10] = parmnum;
return 0;
}
i = sscanf(arg1,"var discipline=%u center=%u local_table=%u parmcat=%u parm=%u",
&discipline, ¢er, &localtab, &parmcat, &parmnum);
if (i == 5) {
sec[0][6] = discipline;
uint2_char(center,sec[1]+5);
sec[1][10] = localtab;
sec[4][9] = parmcat;
sec[4][10] = parmnum;
return 0;
}
fprintf(stderr,">>> i = %d\n",i);
}
p = NULL;
/* try user table */
if (user_gribtable != NULL) {
p = user_gribtable;
center = GB2_Center(sec);
while (p->disc != -1) {
if (strcmp(arg1,p->name) == 0) {
if (center == p->cntr) break;
if (p->disc < 192 && p->pcat < 192 && p->pnum < 192) break;
}
p++;
}
}
/* search for non-local table match first */
if (p == NULL || p->disc == -1) {
p = gribtable;
while (p->disc != -1) {
if (p->disc < 192 && p->pcat < 192 && p->pnum < 192 && strcmp(arg1,p->name) == 0) {
break;
}
p++;
}
}
/* try local tables */
if (p->disc == -1) {
p = gribtable;
center = GB2_Center(sec);
while (p->disc != -1) {
if (center == p->cntr && strcmp(arg1,p->name) == 0) {
break;
}
p++;
}
}
if (p->disc == -1) fatal_error("set_var: could not find %s", arg1);
sec[0][6] = p->disc;
sec[1][9] = p->mtab_set;
sec[1][10] = p->ltab;
sec[4][9] = p->pcat;
sec[4][10] = p->pnum;
return 0;
}
/*
* HEADER:-1:set_center:misc:1:changes center X = C or C:S C and S are center/subcenter numbers
*/
int f_set_center(ARG1) {
int i, center, subcenter;
if (mode >= 0) {
i = sscanf(arg1,"%d:%d", ¢er, &subcenter);
if (i == 0) fatal_error("set_center: bad arg %s", arg1);
int2_char(center, sec[1]+5);
if (i == 2) int2_char(subcenter, sec[1]+7);
}
return 0;
}
/*
* HEADER:100:set:misc:2:set X = Y, X=local_table,etc (help: -set help help)
*/
static const char *set_options="discipline, center, subcenter, master_table, local_table, background_process_id, "
"analysis_or_forecast_process_id, model_version_date, chemical, table_1.2, table_1.3, table_1.4, "
"table_3.0, table_3.1/GDT, table_3.2, "
"table_3.3, table_3.4, table_4.0/PDT, table_4.1, table_4.2, table_4.3, table_4.5a, table_4.5b, table_4.6, "
"table_4.7, table_4.8, table_4.10, "
"table_4.11, table_4.230, table_5.0/DRT, table_6.0, %";
extern struct codetable_4_230 codetable_4_230_table[];
int f_set(ARG2) {
int i, j;
double val;
int year,mon,day,hr,minute,second;
unsigned char *p;
char *endptr;
unsigned int k;
if (mode == -1) {
if (strcmp(arg1,"help") == 0) {
sprintf(inv_out,"-set: change values of %s\n", set_options);
return -1;
}
return 0;
}
if (mode >= 0) {
if (strcmp(arg1,"data_*") == 0) {
val = strtod(arg2, &endptr);
if (*endptr == 0) {
if (decode == 1 && data != NULL) {
#pragma omp parallel for private(k)
for (k = 0; k < ndata; k++) {
if (DEFINED_VAL(data[k])) {
data[k] *= val;
}
}
}
else {
fatal_error("set data_*: need option that uses grid data","");
}
}
else {
fatal_error("set data_*: bad value %s", arg2);
}
return 0;
}
if (strcmp(arg1,"data_+") == 0) {
val = strtod(arg2, &endptr);
if (*endptr == 0) {
if (decode == 1 && data != NULL) {
#pragma omp parallel for private(k)
for (k = 0; k < ndata; k++) {
if (DEFINED_VAL(data[k])) {
data[k] += val;
}
}
}
else {
fatal_error("set data_+: need option that uses grid data","");
}
}
else {
fatal_error("set data_+: bad value %s", arg2);
}
return 0;
}
i = atoi(arg2);
if (strcmp(arg1,"discipline") == 0 || strcmp(arg1,"table_0.0") == 0) {
sec[0][6] = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"local_table") == 0 || strcmp(arg1,"table_1.1") == 0) {
sec[1][10] = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"master_table") == 0 || strcmp(arg1,"table_1.0") == 0) {
sec[1][9] = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"center") == 0) {
int2_char(i, sec[1]+5);
return 0;
}
if (strcmp(arg1,"subcenter") == 0) {
int2_char(i, sec[1]+7);
return 0;
}
if (strcmp(arg1,"background_process_id") == 0) {
p = background_generating_process_identifier_location(sec);
if (p) *p = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"analysis_or_forecast_process_id") == 0) {
p = analysis_or_forecast_generating_process_identifier_location(sec);
if (p) *p = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"model_version_date") == 0) {
p = year_of_model_version_date_location(sec);
if (p) {
i = sscanf(arg2,"%4d%2d%2d%2d%2d%2d", &year,&mon,&day,&hr,&minute,&second);
if (i != 6) fatal_error("set model_version_date YYYYMMDDHHmmSS","");
uint2_char(year, p);
p += 2;
*p++ = mon;
*p++ = day;
*p++ = hr;
*p++ = minute;
*p++ = second;
}
return 0;
}
if (strcmp(arg1,"table_1.2") == 0) {
sec[1][11] = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_1.3") == 0) {
sec[1][19] = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_1.4") == 0) {
sec[1][20] = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_3.0") == 0) {
sec[3][5] = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_3.1") == 0 || strcmp(arg1,"GDT") == 0) {
uint2_char(i, sec[3]+12);
return 0;
}
if (strcmp(arg1,"table_3.2") == 0) {
p = code_table_3_2_location(sec);
if (p) *p = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_3.3") == 0 || strcmp(arg1,"flag_table_3.3") == 0) {
return set_flag_table_3_3(sec, i);
}
if (strcmp(arg1,"table_3.4") == 0 || strcmp(arg1,"flag_table_3.4") == 0) {
return set_flag_table_3_4(sec, i);
}
if (strcmp(arg1,"table_4.0") == 0 || strcmp(arg1,"PDT") == 0) {
uint2_char(i, sec[4]+7);
return 0;
}
if (strcmp(arg1,"table_4.1") == 0) {
sec[4][9] = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_4.2") == 0) {
sec[4][10] = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_4.3") == 0) {
p = code_table_4_3_location(sec);
if (p) *p = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_4.5a") == 0) {
p = code_table_4_5a_location(sec);
if (p) *p = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_4.5b") == 0) {
p = code_table_4_5b_location(sec);
if (p) *p = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_4.6") == 0) {
p = code_table_4_6_location(sec);
if (p) *p = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_4.7") == 0) {
p = code_table_4_7_location(sec);
if (p) *p = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_4.8") == 0) {
p = code_table_4_8_location(sec);
if (p) *p = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_4.10") == 0) {
p = code_table_4_10_location(sec);
if (p) *p = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_4.11") == 0) {
p = code_table_4_11_location(sec);
if (p) *p = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"table_4.230") == 0 || strcmp(arg1, "chemical") == 0) { // chemical
p = code_table_4_230_location(sec);
if (p) {
j = 0;
while (codetable_4_230_table[j].no != 65535) {
if (strcmp(arg2, codetable_4_230_table[j].name) == 0) {
uint2_char(codetable_4_230_table[j].no, p);
return 0;
}
j++;
}
if (i == 0 && arg2[0] != '0') fatal_error("set table_4.230/chemical wrong chemical=%s\n", arg2);
uint2_char(i, p);
return 0;
}
/* could not change chemical type */
fatal_error("set table_4.230/chemical wrong template chemical=%s\n", arg2);
return 0;
}
if (strcmp(arg1,"table_5.0") == 0 || strcmp(arg1,"DRT") == 0) {
uint2_char(i, sec[5]+9);
return 0;
}
if (strcmp(arg1,"table_6.0") == 0) {
sec[6][5] = (unsigned char) i;
return 0;
}
if (strcmp(arg1,"%") == 0) {
p = percentile_value_location(sec);
if (p) *p = (unsigned char) i;
return 0;
}
fatal_error("set: allowed values: %s", set_options);
return 1;
}
return 0;
}
/*
* HEADER:100:set_ave:misc:1:set ave/acc .. only use on pdt=4.0/4.8 (old code)
*/
/* the old set_ftime/set_ave is being replaced by a new version of set_ftime */
int f_set_ave(ARG1) {
int i, tr, tr2, len, len_arg1;
char string[STRING_SIZE];
char string2[STRING_SIZE];
char string3[STRING_SIZE];
const char *string4;
static unsigned char new_sec4[58+12+12]; // now use n == 3 forms
int year, month, day, hour, minute, second;
int j,k,j2,k2, m, m2, missing;
if (mode < 0) return 0;
len_arg1 = strlen(arg1);
// 6 hour ave anl
i = sscanf(arg1,"%d %s %s %s%n",&j,string,string2,string3,&len);
if (len != len_arg1) i = 0;
if (i == 4 && ((tr = a2time_range(string)) >= 0)) {
for (i = 0; i < 34; i++) new_sec4[i] = sec[4][i];
get_time(sec[1]+12, &year, &month, &day, &hour, &minute, &second);
add_time(&year, &month, &day, &hour, &minute, &second, j, tr);
save_time(year, month, day, hour, minute, second, new_sec4+34);
uint_char(58, new_sec4);
uint2_char(8, new_sec4+7); // pdt = 8
new_sec4[17] = tr; // hour, etc
int_char(0, new_sec4+18); // start time
new_sec4[41] = 1; // number of time ranges
uint_char(0, new_sec4+42); // missing
if (strcmp(string2,"ave") == 0) new_sec4[46] = 0;
else if (strcmp(string2,"acc") == 0) new_sec4[46] = 1;
else if (strcmp(string2,"max") == 0) new_sec4[46] = 2;
else if (strcmp(string2,"min") == 0) new_sec4[46] = 3;
else if (strcmp(string2,"last-first") == 0) new_sec4[46] = 4;
else if (strcmp(string2,"RMS") == 0) new_sec4[46] = 5;
else if (strcmp(string2,"StdDev") == 0) new_sec4[46] = 6;
else if (strcmp(string2,"covar") == 0) new_sec4[46] = 7;
else if (strcmp(string2,"first-last") == 0) new_sec4[46] = 8;
else fatal_error("set_ave: unknown statistical operator %s",string2);
new_sec4[47] = 1;
new_sec4[48] = tr; // hour
uint_char(j, new_sec4+49);
new_sec4[53] = tr;
uint_char(0, new_sec4+54);
sec[4] = &(new_sec4[0]);
return 0;
}
// 1-5 hour ave fcst
i = sscanf(arg1,"%d-%d %s %s %s%n",&j,&k,string3,string,string2,&len);
if (len != len_arg1) i = 0;
if (i == 5 && (tr = a2time_range(string3)) >= 0) {
for (i = 0; i < 34; i++) new_sec4[i] = sec[4][i];
get_time(sec[1]+12, &year, &month, &day, &hour, &minute, &second);
add_time(&year, &month, &day, &hour, &minute, &second, k, tr);
save_time(year, month, day, hour, minute, second, new_sec4+34);
uint_char(58, new_sec4);
uint2_char(8, new_sec4+7); // pdt = 8
new_sec4[17] = tr; // hour
int_char(j, new_sec4+18); // start time
new_sec4[41] = 1; // number of time ranges
uint_char(0, new_sec4+42); // missing
if (strcmp(string,"ave") == 0) new_sec4[46] = 0;
else if (strcmp(string,"acc") == 0) new_sec4[46] = 1;
else if (strcmp(string,"max") == 0) new_sec4[46] = 2;
else if (strcmp(string,"min") == 0) new_sec4[46] = 3;
else if (strcmp(string,"last-first") == 0) new_sec4[46] = 4;
else if (strcmp(string,"RMS") == 0) new_sec4[46] = 5;
else if (strcmp(string,"StdDev") == 0) new_sec4[46] = 6;
else if (strcmp(string,"covar") == 0) new_sec4[46] = 7;
else if (strcmp(string,"first-last") == 0) new_sec4[46] = 8;
else fatal_error("set_ave: unknown statistical operator %s", string);
if (strcmp(string2,"anl") == 0) new_sec4[47] = 1;
else if (strcmp(string2,"fcst") == 0) new_sec4[47] = 2;
else fatal_error("set_ave: expecting anl/fcst got %s", string);
new_sec4[48] = tr; // hour
uint_char(k-j, new_sec4+49);
new_sec4[53] = tr;
uint_char(0, new_sec4+54);
sec[4] = &(new_sec4[0]);
return 0;
}
// obsolete form: 1@6 hour ave anl,missing=0
i = sscanf(arg1,"%d@%d %s ave anl,missing=%d%n",&j,&k,string,&missing,&len);
if (len != len_arg1) i = 0;
// new form 1@6 hour ave(anl),missing=0
if (i != 4) {
i = sscanf(arg1,"%d@%d %s ave(anl),missing=%d%n",&j,&k,string,&missing,&len);
if (len != len_arg1) i = 0;
}
if (i == 4) {
tr = a2time_range(string);
if (tr == -1) fatal_error("set_ave: bad time range %s", string);
for (i = 0; i < 34; i++) new_sec4[i] = sec[4][i];
get_time(sec[1]+12, &year, &month, &day, &hour, &minute, &second);
add_time(&year, &month, &day, &hour, &minute, &second, (j-1)*k, tr);
save_time(year, month, day, hour, minute, second, new_sec4+34);
uint_char(58, new_sec4); // length of section
uint2_char(8, new_sec4+7); // pdt = 8
new_sec4[17] = tr; // hour
int_char(0, new_sec4+18); // start time 0 = analysis
new_sec4[41] = 1; // number of time ranges
uint_char(missing, new_sec4+42); // missing
new_sec4[46] = 0; // 0 = ave 1 = acc
new_sec4[47] = 1; // 1 = start of forecast increased
new_sec4[53] = new_sec4[48] = tr;
uint_char((j-1)*k, new_sec4+49);
uint_char(k, new_sec4+54);
sec[4] = &(new_sec4[0]);
return 0;
}
// 1@6 hour ave(6 hour fcst),missing=0
i = sscanf(arg1,"%d@%d %s ave(%d %s fcst),missing=%d%n",&j,&k,string, &m, string2, &missing,&len);
if (len != len_arg1) i = 0;
if (i == 6) {
tr = a2time_range(string);
if (tr == -1) fatal_error("set_ave: bad time range %s", string);
tr2 = a2time_range(string2);
if (tr2 == -1) fatal_error("set_ave: bad time range %s", string2);
for (i = 0; i < 34; i++) new_sec4[i] = sec[4][i];
get_time(sec[1]+12, &year, &month, &day, &hour, &minute, &second);
add_time(&year, &month, &day, &hour, &minute, &second, (j-1)*k, tr);
add_time(&year, &month, &day, &hour, &minute, &second, m, tr2);
save_time(year, month, day, hour, minute, second, new_sec4+34);
uint_char(58, new_sec4); // length of section
uint2_char(8, new_sec4+7); // pdt = 8
new_sec4[17] = tr2; // forecast time range
int_char(m, new_sec4+18) ; // start time 0 = analysis
new_sec4[41] = 1; // number of time ranges
uint_char(missing, new_sec4+42); // missing
new_sec4[46] = 0; // 0 = ave 1 = acc
new_sec4[47] = 1; // 1 = start of forecast increased
new_sec4[53] = new_sec4[48] = tr;
uint_char((j-1)*k, new_sec4+49);
uint_char(k, new_sec4+54);
sec[4] = &(new_sec4[0]);
return 0;
}
// 1@6 hour ave(6 hour fcst)++,missing=0
i = sscanf(arg1,"%d@%d %s ave(%d %s fcst)++,missing=%d%n",&j,&k,string, &m, string2, &missing,&len);
if (len != len_arg1) i = 0;
if (i == 6) {
tr = a2time_range(string);
if (tr == -1) fatal_error("set_ave: bad time range %s", string);
tr2 = a2time_range(string2);
if (tr2 == -1) fatal_error("set_ave: bad time range %s", string2);
for (i = 0; i < 34; i++) new_sec4[i] = sec[4][i];
get_time(sec[1]+12, &year, &month, &day, &hour, &minute, &second);
add_time(&year, &month, &day, &hour, &minute, &second, (j-1)*k, tr);
add_time(&year, &month, &day, &hour, &minute, &second, m, tr2);
save_time(year, month, day, hour, minute, second, new_sec4+34);
uint_char(58, new_sec4); // length of section
uint2_char(8, new_sec4+7); // pdt = 8
new_sec4[17] = tr2; // forecast time range
int_char(m, new_sec4+18); // start time 0 = analysis
new_sec4[41] = 1; // number of time ranges
uint_char(missing, new_sec4+42); // missing
new_sec4[46] = 0; // 0 = ave 1 = acc
new_sec4[47] = 2; // 2 = forecast time is increased
new_sec4[53] = new_sec4[48] = tr;
uint_char((j-1)*k, new_sec4+49);
uint_char(k, new_sec4+54);
sec[4] = &(new_sec4[0]);
return 0;
}
// 1@6 hour ave(0-6 hour ave fcst),missing=0
i = sscanf(arg1,"%d@%d %s ave(%d-%d %s %s fcst),missing=%d%n",&j,&k,string, &m, &m2,string2, string3, &missing,&len);
string4 = "ave";
if (i != 8) {
i = sscanf(arg1,"%d@%d %s acc(%d-%d %s %s fcst),missing=%d%n",&j,&k,string, &m, &m2,string2, string3, &missing,&len);
string4 = "acc";
}
if (i != 8) {
i = sscanf(arg1,"%d@%d %s min(%d-%d %s %s fcst),missing=%d%n",&j,&k,string, &m, &m2,string2, string3, &missing,&len);
string4 = "min";
}
if (i != 8) {
i = sscanf(arg1,"%d@%d %s max(%d-%d %s %s fcst),missing=%d%n",&j,&k,string, &m, &m2,string2, string3, &missing,&len);
string4 = "max";
}
if (len != len_arg1) i = 0;
if (i == 8) {
tr = a2time_range(string);
if (tr == -1) fatal_error("set_ave: bad time range %s", string);
tr2 = a2time_range(string2);
if (tr2 == -1) fatal_error("set_ave: bad time range %s", string2);
for (i = 0; i < 34; i++) new_sec4[i] = sec[4][i];
get_time(sec[1]+12, &year, &month, &day, &hour, &minute, &second);
add_time(&year, &month, &day, &hour, &minute, &second, (j-1)*k, tr);
add_time(&year, &month, &day, &hour, &minute, &second, m2, tr2);
save_time(year, month, day, hour, minute, second, new_sec4+34);
uint_char(70, new_sec4); // length of section
uint2_char(8, new_sec4+7); // pdt = 8
new_sec4[17] = tr2; // forecast time range
int_char(m, new_sec4+18); // start time 0 = analysis
new_sec4[41] = 2; // number of time ranges
uint_char(missing, new_sec4+42); // missing
// string4
if (strcmp(string4,"ave") == 0) new_sec4[46] = 0;
else if (strcmp(string4,"acc") == 0) new_sec4[46] = 1;
else if (strcmp(string4,"max") == 0) new_sec4[46] = 2;
else if (strcmp(string4,"min") == 0) new_sec4[46] = 3;
else fatal_error("set_ave: unknown statistical operator %s",string4);
new_sec4[47] = 1; // 1 = start of forecast increased
new_sec4[53] = new_sec4[48] = tr;
uint_char((j-1)*k, new_sec4+49);
uint_char(k, new_sec4+54);
if (strcmp(string3,"ave") == 0) new_sec4[46+12] = 0;
else if (strcmp(string3,"acc") == 0) new_sec4[46+12] = 1;
else if (strcmp(string3,"max") == 0) new_sec4[46+12] = 2;
else if (strcmp(string3,"min") == 0) new_sec4[46+12] = 3;
else fatal_error("set_ave: unknown statistical operator %s",string3);
// new_sec4[46+12] = 0; // 0 = ave 1 = acc
new_sec4[47+12] = 2; // same forecast
new_sec4[53+12] = new_sec4[48+12] = tr2;
uint_char(m2-m , new_sec4+49+12);
uint_char(0, new_sec4+54+12);
sec[4] = &(new_sec4[0]);
return 0;
}
// 1@6 hour ave(0-6 hour ave fcst)++,missing=0
i = sscanf(arg1,"%d@%d %s ave(%d-%d %s %s fcst)++,missing=%d%n",&j,&k,string, &m, &m2,string2, string3, &missing,&len);
string4 = "ave";
if (i != 8) {
i = sscanf(arg1,"%d@%d %s acc(%d-%d %s %s fcst)++,missing=%d%n",&j,&k,string, &m, &m2,string2, string3, &missing,&len);
string4 = "acc";
}
if (i != 8) {
i = sscanf(arg1,"%d@%d %s min(%d-%d %s %s fcst)++,missing=%d%n",&j,&k,string, &m, &m2,string2, string3, &missing,&len);
string4 = "min";
}
if (i != 8) {
i = sscanf(arg1,"%d@%d %s max(%d-%d %s %s fcst)++,missing=%d%n",&j,&k,string, &m, &m2,string2, string3, &missing,&len);
string4 = "max";
}
if (len != len_arg1) i = 0;
if (i == 8) {
tr = a2time_range(string);
if (tr == -1) fatal_error("set_ave: bad time range %s", string);
tr2 = a2time_range(string2);
if (tr2 == -1) fatal_error("set_ave: bad time range %s", string2);
for (i = 0; i < 34; i++) new_sec4[i] = sec[4][i];
get_time(sec[1]+12, &year, &month, &day, &hour, &minute, &second);
add_time(&year, &month, &day, &hour, &minute, &second, (j-1)*k, tr);
add_time(&year, &month, &day, &hour, &minute, &second, m2, tr2);
save_time(year, month, day, hour, minute, second, new_sec4+34);
uint_char(70, new_sec4); // length of section
uint2_char(8, new_sec4+7); // pdt = 8
new_sec4[17] = tr2; // forecast time range
int_char(m, new_sec4+18); // start time 0 = analysis
new_sec4[41] = 2; // number of time ranges
uint_char(missing, new_sec4+42); // missing
// string4
if (strcmp(string4,"ave") == 0) new_sec4[46] = 0;
else if (strcmp(string4,"acc") == 0) new_sec4[46] = 1;
else if (strcmp(string4,"max") == 0) new_sec4[46] = 2;
else if (strcmp(string4,"min") == 0) new_sec4[46] = 3;
else fatal_error("set_ave: unknown statistical operator %s",string4);
new_sec4[47] = 2; // 2 = fcst time increased
new_sec4[53] = new_sec4[48] = tr;
uint_char((j-1)*k, new_sec4+49);
uint_char(k, new_sec4+54);
if (strcmp(string3,"ave") == 0) new_sec4[46+12] = 0;
else if (strcmp(string3,"acc") == 0) new_sec4[46+12] = 1;
else if (strcmp(string3,"max") == 0) new_sec4[46+12] = 2;
else if (strcmp(string3,"min") == 0) new_sec4[46+12] = 3;
else fatal_error("set_ave: unknown statistical operator %s",string3);
// new_sec4[46+12] = 0; // 0 = ave 1 = acc
new_sec4[47+12] = 2; // same forecast
new_sec4[53+12] = new_sec4[48+12] = tr2;
uint_char(m2-m , new_sec4+49+12);
uint_char(0, new_sec4+54+12);
sec[4] = &(new_sec4[0]);
return 0;
}
// old 10@30 year ave(124@6 hour ave anl)
// 10@30 year ave(124@6 hour ave (anl))
i = sscanf(arg1,"%d@%d %s ave(%d@%d %s ave (anl)),missing=%d%n",&j,&k,string, &j2,&k2,string2, &missing,&len);
if (len != len_arg1) i = 0;
if (i == 7) {
fprintf(stderr,">>> need to check out climo1\n");
tr = a2time_range(string);
if (tr == -1) fatal_error("set_ave: bad time range %s", string);
tr2 = a2time_range(string2);
if (tr2 == -1) fatal_error("set_ave: bad time range %s", string2);
for (i = 0; i < 34; i++) new_sec4[i] = sec[4][i];
get_time(sec[1]+12, &year, &month, &day, &hour, &minute, &second);
add_time(&year, &month, &day, &hour, &minute, &second, (j-1)*k, tr);
add_time(&year, &month, &day, &hour, &minute, &second, (j2-1)*k2, tr2);
save_time(year, month, day, hour, minute, second, new_sec4+34);
uint_char(58, new_sec4); // length of section
uint2_char(8, new_sec4+7); // pdt = 8
new_sec4[17] = tr; // hour
int_char(0, new_sec4+18); // start time 0 = analysis
new_sec4[41] = 2; // number of time ranges
uint_char(missing, new_sec4+42); // missing
new_sec4[46] = 0; // ave
new_sec4[47] = 1; // 1 = start of forecast increased
new_sec4[53] = new_sec4[48] = tr;
uint_char((j-1)*k, new_sec4+49);
uint_char(k, new_sec4+54);
new_sec4[46+12] = 0; // ave
new_sec4[47+12] = 1; // 1 = start of forecast increased
new_sec4[53+12] = new_sec4[48+12] = tr2;
uint_char((j2-1)*k2, new_sec4+49+12);
uint_char(k2, new_sec4+54+12);
sec[4] = &(new_sec4[0]);
return 0 ;
}
// 10@30 year ave(124@6 hour ave 6 hour fcst)
// 10@30 year ave(124@6 hour ave(0-6 hour ave fcst)
fatal_error("set_ave: not implemented %s", arg1);
return 0;
}
/*
* HEADER:-1:set_flag_table_3.3:misc:1:flag table 3.3 = X
*/
int f_set_flag_table_3_3(ARG1) {
if (mode >= 0) return set_flag_table_3_3(sec, atoi(arg1));
return 0;
}
/*
* HEADER:-1:set_flag_table_3.4:misc:1:flag table 3.4 = X
*/
int f_set_flag_table_3_4(ARG1) {
if (mode >= 0) return set_flag_table_3_4(sec, atoi(arg1));
return 0;
}
|
GB_unaryop__ainv_uint8_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_uint16
// op(A') function: GB_tran__ainv_uint8_uint16
// C type: uint8_t
// A type: uint16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_uint16
(
uint8_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[257],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(Image *image)
{
switch (image->compose)
{
case ColorBurnCompositeOp:
return(image->endian == LSBEndian ? "vidi" : "idiv");
case ColorDodgeCompositeOp:
return(image->endian == LSBEndian ? " vid" : "div ");
case ColorizeCompositeOp:
return(image->endian == LSBEndian ? "rloc" : "colr");
case DarkenCompositeOp:
return(image->endian == LSBEndian ? "krad" : "dark");
case DifferenceCompositeOp:
return(image->endian == LSBEndian ? "ffid" : "diff");
case DissolveCompositeOp:
return(image->endian == LSBEndian ? "ssid" : "diss");
case ExclusionCompositeOp:
return(image->endian == LSBEndian ? "dums" : "smud");
case HardLightCompositeOp:
return(image->endian == LSBEndian ? "tiLh" : "hLit");
case HardMixCompositeOp:
return(image->endian == LSBEndian ? "xiMh" : "hMix");
case HueCompositeOp:
return(image->endian == LSBEndian ? " euh" : "hue ");
case LightenCompositeOp:
return(image->endian == LSBEndian ? "etil" : "lite");
case LinearBurnCompositeOp:
return(image->endian == LSBEndian ? "nrbl" : "lbrn");
case LinearDodgeCompositeOp:
return(image->endian == LSBEndian ? "gddl" : "lddg");
case LinearLightCompositeOp:
return(image->endian == LSBEndian ? "tiLl" : "lLit");
case LuminizeCompositeOp:
return(image->endian == LSBEndian ? " mul" : "lum ");
case MultiplyCompositeOp:
return(image->endian == LSBEndian ? " lum" : "mul ");
case OverlayCompositeOp:
return(image->endian == LSBEndian ? "revo" : "over");
case PinLightCompositeOp:
return(image->endian == LSBEndian ? "tiLp" : "pLit");
case SaturateCompositeOp:
return(image->endian == LSBEndian ? " tas" : "sat ");
case ScreenCompositeOp:
return(image->endian == LSBEndian ? "nrcs" : "scrn");
case SoftLightCompositeOp:
return(image->endian == LSBEndian ? "tiLs" : "sLit");
case VividLightCompositeOp:
return(image->endian == LSBEndian ? "tiLv" : "vLit");
case OverCompositeOp:
default:
return(image->endian == LSBEndian ? "mron" : "norm");
}
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if ((image->alpha_trait != BlendPixelTrait) ||
(image->colorspace != sRGBColorspace))
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
if (image->alpha_trait != BlendPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->alpha_trait == UndefinedPixelTrait)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,0,0,MagickTrue,exception);
if (complete_mask == (Image *) NULL)
return(MagickFalse);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=(MagickRealType) background;
(void) SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
Quantum
*p;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=(MagickRealType) GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=(char) layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(const Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
}
if (image->depth > 16)
return(4);
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image,
const unsigned char *blocks,size_t length)
{
const unsigned char
*p;
ssize_t
offset;
StringInfo
*profile;
unsigned char
name_length;
unsigned int
count;
unsigned short
id,
short_sans;
if (length < 16)
return((StringInfo *) NULL);
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
SetStringInfoName(profile,"8bim");
for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p+=4;
p=PushShortPixel(MSBEndian,p,&id);
p=PushCharPixel(p,&name_length);
if ((name_length % 2) == 0)
name_length++;
p+=name_length;
if (p > (blocks+length-4))
break;
p=PushLongPixel(MSBEndian,p,&count);
offset=(ssize_t) count;
if (((p+offset) < blocks) || ((p+offset) > (blocks+length)))
break;
switch (id)
{
case 0x03ed:
{
unsigned short
resolution;
/*
Resolution info.
*/
if (offset < 16)
break;
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatImageProperty(image,"tiff:XResolution","%*g",
GetMagickPrecision(),image->resolution.x);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatImageProperty(image,"tiff:YResolution","%*g",
GetMagickPrecision(),image->resolution.y);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if ((offset > 4) && (*(p+4) == 0))
psd_info->has_merged_image=MagickFalse;
p+=offset;
break;
}
default:
{
p+=offset;
break;
}
}
if ((offset & 0x01) != 0)
p++;
}
return(profile);
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length)
{
ssize_t
count;
count=ReadBlob(image,length,(unsigned char *) p);
if ((count == (ssize_t) length) && (image->endian != MSBEndian))
{
char
*q;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
return(count);
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
PixelInfo
*color;
Quantum
index;
index=pixel;
if (packet_size == 1)
index=(Quantum) ScaleQuantumToChar(index);
index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index,
exception);
if (type == 0)
SetPixelIndex(image,index,q);
if ((type == 0) && (channels > 1))
return;
color=image->colormap+(ssize_t) GetPixelIndex(image,q);
if (type != 0)
color->alpha=(MagickRealType) pixel;
SetPixelViaPixelInfo(image,color,q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image,pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
break;
}
case -3:
case 1:
{
SetPixelGreen(image,pixel,q);
break;
}
case -4:
case 2:
{
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const ssize_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
const unsigned char
*p;
Quantum
*q;
ssize_t
x;
size_t
packet_size;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
if (packet_size == 2)
{
unsigned short
nibble;
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
else
{
MagickFloatType
nibble;
p=PushFloatPixel(MSBEndian,p,&nibble);
pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble));
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=(ssize_t) image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < (ssize_t) number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
row_size;
ssize_t
count,
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(pixels,0,row_size*sizeof(*pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > (row_size+2048)) /* arbitrary number */
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static void Unpredict8Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
*(p+1)+=*p;
p++;
}
p++;
remaining-=row_size;
}
}
static void Unpredict16Bit(const Image *image,unsigned char *pixels,
const size_t count,const size_t row_size)
{
unsigned char
*p;
size_t
length,
remaining;
p=pixels;
remaining=count;
while (remaining > 0)
{
length=image->columns;
while (--length)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
p+=2;
}
p+=2;
remaining-=row_size;
}
}
static void Unpredict32Bit(const Image *image,unsigned char *pixels,
unsigned char *output_pixels,const size_t row_size)
{
unsigned char
*p,
*q;
ssize_t
y;
size_t
offset1,
offset2,
offset3,
remaining;
unsigned char
*start;
offset1=image->columns;
offset2=2*offset1;
offset3=3*offset1;
p=pixels;
q=output_pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
start=p;
remaining=row_size;
while (--remaining)
{
*(p+1)+=*p;
p++;
}
p=start;
remaining=image->columns;
while (remaining--)
{
*(q++)=*p;
*(q++)=*(p+offset1);
*(q++)=*(p+offset2);
*(q++)=*(p+offset3);
p++;
}
p=start+row_size;
}
}
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
unsigned char
*p;
size_t
count,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
if ((MagickSizeType) compact_size > GetBlobSize(image))
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
(void) inflateEnd(&stream);
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
if (ret == Z_STREAM_END)
break;
}
(void) inflateEnd(&stream);
}
if (compression == ZipWithPrediction)
{
if (packet_size == 1)
Unpredict8Bit(image,pixels,count,row_size);
else if (packet_size == 2)
Unpredict16Bit(image,pixels,count,row_size);
else if (packet_size == 4)
{
unsigned char
*output_pixels;
output_pixels=(unsigned char *) AcquireQuantumMemory(count,
sizeof(*output_pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
Unpredict32Bit(image,pixels,output_pixels,row_size);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
pixels=output_pixels;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if ((layer_info->channel_info[channel].type < -1) &&
(layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0))
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
(void) SeekBlob(image,(MagickOffsetType)
layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
if (mask != (Image *) NULL)
{
(void) ResetImagePixels(mask,exception);
(void) SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
}
offset=TellBlob(image);
status=MagickFalse;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
(ssize_t) layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
(ssize_t) layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
(ssize_t) layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
(void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2,
SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
(void) DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
if (mask != (Image *) NULL)
{
if (layer_info->mask.image != (Image *) NULL)
layer_info->mask.image=DestroyImage(layer_info->mask.image);
layer_info->mask.image=mask;
}
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,
(size_t) j,compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info,
LayerInfo *layer_info)
{
int
channel_type;
ssize_t
i;
if (layer_info->channels < psd_info->min_channels)
return(MagickFalse);
channel_type=RedChannel;
if (psd_info->min_channels >= 3)
channel_type|=(GreenChannel | BlueChannel);
if (psd_info->min_channels >= 4)
channel_type|=BlackChannel;
for (i=0; i < (ssize_t) layer_info->channels; i++)
{
short
type;
type=layer_info->channel_info[i].type;
if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0))
return(MagickFalse);
if (type == -1)
{
channel_type|=AlphaChannel;
continue;
}
if (type < -1)
continue;
if (type == 0)
channel_type&=~RedChannel;
else if (type == 1)
channel_type&=~GreenChannel;
else if (type == 2)
channel_type&=~BlueChannel;
else if (type == 3)
channel_type&=~BlackChannel;
}
if (channel_type == 0)
return(MagickTrue);
if ((channel_type == AlphaChannel) &&
(layer_info->channels >= psd_info->min_channels + 1))
return(MagickTrue);
return(MagickFalse);
}
static void AttachPSDLayers(Image *image,LayerInfo *layer_info,
ssize_t number_layers)
{
ssize_t
i;
ssize_t
j;
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers == 0)
{
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
return;
}
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info,
const ImageInfo *image_info,const size_t index)
{
if (psd_info->has_merged_image == MagickFalse)
return(MagickFalse);
if (image_info->number_scenes == 0)
return(MagickFalse);
if (index < image_info->scene)
return(MagickTrue);
if (index > image_info->scene+image_info->number_scenes-1)
return(MagickTrue);
return(MagickFalse);
}
static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image)
{
/*
The number of layers cannot be used to determine if the merged image
contains an alpha channel. So we enable it when we think we should.
*/
if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) ||
((psd_info->mode == RGBMode) && (psd_info->channels > 3)) ||
((psd_info->mode == CMYKMode) && (psd_info->channels > 4)))
image->alpha_trait=BlendPixelTrait;
}
static void ParseAdditionalInfo(LayerInfo *layer_info)
{
char
key[5];
size_t
remaining_length;
unsigned char
*p;
unsigned int
size;
p=GetStringInfoDatum(layer_info->info);
remaining_length=GetStringInfoLength(layer_info->info);
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
break;
if (LocaleNCompare(key,"luni",sizeof(key)) == 0)
{
unsigned char
*name;
unsigned int
length;
length=(unsigned int) (*p++) << 24;
length|=(unsigned int) (*p++) << 16;
length|=(unsigned int) (*p++) << 8;
length|=(unsigned int) (*p++);
if (length * 2 > size - 4)
break;
if (sizeof(layer_info->name) <= length)
break;
name=layer_info->name;
while (length > 0)
{
/* Only ASCII strings are supported */
if (*p++ != '\0')
break;
*name++=*p++;
length--;
}
if (length == 0)
*name='\0';
break;
}
else
p+=size;
remaining_length-=(size_t) size;
}
}
static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image)
{
char
type[4];
MagickSizeType
size;
ssize_t
count;
size=GetPSDSize(psd_info,image);
if (size != 0)
return(size);
(void) ReadBlobLong(image);
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) ||
(LocaleNCompare(type,"Mt32",4) == 0) ||
(LocaleNCompare(type,"Mtrn",4) == 0)))
{
size=GetPSDSize(psd_info,image);
if (size != 0)
return(0);
image->alpha_trait=BlendPixelTrait;
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
return(0);
count=ReadPSDString(image,type,4);
}
if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) ||
(LocaleNCompare(type,"Lr32",4) == 0)))
size=GetPSDSize(psd_info,image);
return(size);
}
static MagickBooleanType ReadPSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
ssize_t
i;
ssize_t
count,
index,
j,
number_layers;
size=GetLayerInfoSize(psd_info,image);
if (size == 0)
{
CheckMergedImageAlpha(psd_info,image);
return(MagickTrue);
}
layer_info=(LayerInfo *) NULL;
number_layers=(ssize_t) ReadBlobSignedShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
top,
left,
bottom,
right;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
top=(ssize_t) ReadBlobSignedLong(image);
left=(ssize_t) ReadBlobSignedLong(image);
bottom=(ssize_t) ReadBlobSignedLong(image);
right=(ssize_t) ReadBlobSignedLong(image);
if ((right < left) || (bottom < top))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].page.y=top;
layer_info[i].page.x=left;
layer_info[i].page.width=(size_t) (right-left);
layer_info[i].page.height=(size_t) (bottom-top);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
if ((layer_info[i].channel_info[j].type < -4) ||
(layer_info[i].channel_info[j].type > 4))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"NoSuchImageChannel",
image->filename);
}
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,type,4);
if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadPSDString(image,layer_info[i].blendkey,4);
if (count != 4)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t)
(ReadBlobSignedLong(image)-layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (
ReadBlobSignedLong(image)-layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double)
layer_info[i].mask.page.width,(double)
layer_info[i].mask.page.height,(double) ((MagickOffsetType)
length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
if (length > GetBlobSize(image))
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"InsufficientImageDataInFile",image->filename);
}
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
ParseAdditionalInfo(&layer_info[i]);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping != MagickFalse)
{
AttachPSDLayers(image,layer_info,number_layers);
return(MagickTrue);
}
status=MagickTrue;
index=0;
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].image == (Image *) NULL) ||
(PSDSkipImage(psd_info, image_info,++index) != MagickFalse))
{
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
(MagickSizeType) number_layers);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
AttachPSDLayers(image,layer_info,number_layers);
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse,
exception));
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
i;
if ((image_info->number_scenes != 0) && (image_info->scene != 0))
return(MagickTrue);
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
ssize_t
type;
type=i;
if ((type == 1) && (psd_info->channels == 2))
type=-1;
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,type,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i,
psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
image_list_length;
ssize_t
count;
StringInfo
*profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels < 1)
ThrowReaderException(CorruptImageError,"MissingImageChannel");
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) &&
(psd_info.depth != 16) && (psd_info.depth != 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
status=ResetImagePixels(image,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
psd_info.min_channels=3;
if (psd_info.mode == LabMode)
(void) SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
psd_info.min_channels=4;
(void) SetImageColorspace(image,CMYKColorspace,exception);
}
else
if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
if (psd_info.depth != 32)
{
status=AcquireImageColormap(image,MagickMin((size_t)
(psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
}
psd_info.min_channels=1;
(void) SetImageColorspace(image,GRAYColorspace,exception);
}
else
if (psd_info.mode == IndexedMode)
psd_info.min_channels=1;
if (psd_info.channels < psd_info.min_channels)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if ((psd_info.mode == IndexedMode) && (length < 3))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32))
{
/*
Duotone image data; the format of this data is undocumented.
32 bits per pixel; the colormap is ignored.
*/
(void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=(size_t) length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
(unsigned char) ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
psd_info.has_merged_image=MagickTrue;
profile=(StringInfo *) NULL;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
if (length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(psd_info.has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers,
exception) != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
(void) SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (EOFBlob(image) != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
}
if (image_info->ping != MagickFalse)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
image_list_length=GetImageListLength(image);
if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1))
psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage(
image_info,image,&psd_info,exception);
if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) &&
(length != 0))
{
(void) SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse,
exception);
if (status != MagickTrue)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
image_list_length=GetImageListLength(image);
}
if (psd_info.has_merged_image == MagickFalse)
{
Image
*merged;
if (image_list_length == 1)
{
if (profile != (StringInfo *) NULL)
profile=DestroyStringInfo(profile);
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
}
image->background_color.alpha=(MagickRealType) TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
(void) SetImageBackgroundColor(image,exception);
merged=MergeImageLayers(image,FlattenLayer,exception);
if (merged == (Image *) NULL)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
ReplaceImageInList(&image,merged);
}
if (profile != (StringInfo *) NULL)
{
Image
*next;
i=0;
next=image;
while (next != (Image *) NULL)
{
if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse)
(void) SetImageProfile(next,GetStringInfoName(profile),profile,
exception);
next=next->next;
}
profile=DestroyStringInfo(profile);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned int) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=WriteBlobMSBLong(image,(unsigned int) size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobLong(image,(unsigned int) size));
return(WriteBlobLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickOffsetType offset)
{
MagickOffsetType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
(void) SeekBlob(image,offset,SEEK_SET);
result=SetPSDSize(psd_info,image,size);
(void) SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
ssize_t
i,
j;
unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const CompressionType compression,
const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (compression == RLECompression)
{
length=(size_t) WriteBlobShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
length=(size_t) WriteBlobShort(image,ZipWithoutPrediction);
#endif
else
length=(size_t) WriteBlobShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
const CompressionType compression,ExceptionInfo *exception)
{
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
const Quantum
*p;
ssize_t
i;
size_t
count,
length;
ssize_t
y;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(
MagickMinBufferExtent,sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
memset(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) MagickMinBufferExtent;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) MagickMinBufferExtent-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
CompressionType
compression;
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
compression=next_image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if ((next_image->storage_class != PseudoClass) ||
(IsImageGray(next_image) != MagickFalse))
{
if (IsImageGray(next_image) == MagickFalse)
channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 :
3);
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,compression,
(ssize_t) channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if ((next_image->storage_class == PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,compression,
exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,compression,
exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=(size_t) WriteBlobShort(image,(const unsigned short) channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) memmove(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) &&
((ssize_t) length-(cnt+12)-(q-datum)) > 0)
{
(void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(char) (*p++);
key[1]=(char) (*p++);
key[2]=(char) (*p++);
key[3]=(char) (*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) memmove(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
(void) SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDLayersInternal(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size,
ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
rounded_size,
size;
status=MagickTrue;
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
(void) SetPSDSize(psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0);
}
size+=WriteBlobSignedLong(image,(signed int) next_image->page.y);
size+=WriteBlobSignedLong(image,(signed int) next_image->page.x);
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+
next_image->rows));
size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+
next_image->columns));
channels=1;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 :
3);
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(psd_info,image,-2);
size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM");
size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,(const unsigned char)
(next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobLong(image,20);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.y);
size+=WriteBlobSignedLong(image,(const signed int) mask->page.x);
size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+
mask->page.y));
size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+
mask->page.x));
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,(const unsigned char)
(mask->compose == NoCompositeOp ? 2 : 0));
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
if (layers_size != (size_t*) NULL)
*layers_size=size;
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
(void) DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
return(status);
}
ModuleExport MagickBooleanType WritePSDLayers(Image * image,
const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception)
{
PolicyDomain
domain;
PolicyRights
rights;
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse)
return(MagickTrue);
return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL,
exception);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const StringInfo
*icc_profile;
MagickBooleanType
status;
PSDInfo
psd_info;
ssize_t
i;
size_t
length,
num_channels,
packet_size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
/* When the image has a color profile it won't be converted to gray scale */
if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) &&
(SetImageGray(image,exception) != MagickFalse))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
if (status != MagickFalse)
{
MagickOffsetType
size_offset;
size_t
size;
size_offset=TellBlob(image);
(void) SetPSDSize(&psd_info,image,0);
status=WritePSDLayersInternal(image,image_info,&psd_info,&size,
exception);
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 12),size_offset);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
image->compression=image_info->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
GB_unop__tan_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__tan_fc64_fc64
// op(A') function: GB_unop_tran__tan_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = ctan (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ctan (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = ctan (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TAN || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__tan_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = ctan (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = ctan (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__tan_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/timer.h"
#include "magick/token.h"
#include "magick/utility.h"
#include "magick/version.h"
#include "magick/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireMagickMemory(sizeof(*image));
if (image == (Image *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MaxTextExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
image->blur=1.0;
GetExceptionInfo(&image->exception);
(void) QueryColorDatabase(BackgroundColor,&image->background_color,
&image->exception);
(void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception);
(void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception);
(void) QueryColorDatabase(TransparentColor,&image->transparent_color,
&image->exception);
GetTimerInfo(&image->timer);
image->ping=MagickFalse;
image->cache=AcquirePixelCache(0);
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=time((time_t *) NULL);
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AllocateSemaphoreInfo();
image->signature=MagickSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MaxTextExtent);
(void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->matte_color=image_info->matte_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
(void) SyncImageSettings(image_info,image);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info));
if (image_info == (ImageInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MaxTextExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MaxTextExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
matte,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
matte=images->matte;
number_images=1;
width=images->columns;
height=images->rows;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass) == MagickFalse)
{
InheritException(exception,&append_image->exception);
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
append_image->matte=matte;
(void) SetImageBackgroundColor(append_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
Image
*image;
MagickBooleanType
proceed;
image=CloneImage(next,0,0,MagickTrue,exception);
if (image == (Image *) NULL)
break;
status=TransformImageColorspace(image,append_image->colorspace);
if (status == MagickFalse)
break;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict append_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
append_indexes=GetCacheViewAuthenticIndexQueue(append_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
if ((image->colorspace == CMYKColorspace) &&
(append_image->colorspace == CMYKColorspace))
SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=DestroyImage(image);
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
GetImageException(image,exception);
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image)
{
return(ClipImagePath(image,"#1",MagickTrue));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(&image->exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask);
if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse);
(void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageClipMask(image,clip_mask);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
double
scale;
Image
*clone_image;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
clone_image=(Image *) AcquireMagickMemory(sizeof(*clone_image));
if (clone_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickSignature;
clone_image->storage_class=image->storage_class;
clone_image->channels=image->channels;
clone_image->colorspace=image->colorspace;
clone_image->matte=image->matte;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
if (image->colormap != (PixelPacket *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelPacket *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) CopyMagickMemory(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
GetExceptionInfo(&clone_image->exception);
InheritException(&clone_image->exception,&image->exception);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MaxTextExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent);
(void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
clone_image->clip_mask=NewImageList();
clone_image->mask=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AllocateSemaphoreInfo();
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
if ((columns == image->columns) && (rows == image->rows))
{
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->columns=columns;
clone_image->rows=rows;
clone_image->cache=ClonePixelCache(image->cache);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
(void) CloneString(&clone_info->size,image_info->size);
(void) CloneString(&clone_info->extract,image_info->extract);
(void) CloneString(&clone_info->scenes,image_info->scenes);
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
(void) CloneString(&clone_info->sampling_factor,image_info->sampling_factor);
(void) CloneString(&clone_info->server_name,image_info->server_name);
(void) CloneString(&clone_info->font,image_info->font);
(void) CloneString(&clone_info->texture,image_info->texture);
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->pen=image_info->pen;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->matte_color=image_info->matte_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colors=image_info->colors;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->preview_type=image_info->preview_type;
clone_info->group=image_info->group;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
(void) CloneString(&clone_info->view,image_info->view);
(void) CloneString(&clone_info->authenticate,image_info->authenticate);
(void) CloneImageOptions(clone_info,image_info);
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->virtual_pixel_method=image_info->virtual_pixel_method;
(void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent);
(void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MaxTextExtent);
clone_info->subimage=image_info->scene; /* deprecated */
clone_info->subrange=image_info->number_scenes; /* deprecated */
clone_info->channel=image_info->channel;
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelPacket *) NULL)
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info*) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
DestroyBlob(image);
(void) DestroyExceptionInfo(&image->exception);
if (image->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&image->semaphore);
image->signature=(~MagickSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
if (image_info->authenticate != (char *) NULL)
image_info->authenticate=DestroyString(
image_info->authenticate);
DestroyImageOptions(image_info);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
image_info->signature=(~MagickSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream.
%
% The format of the DisassociateImageStream method is:
%
% MagickBooleanType DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
(void) DetachBlob(image->blob);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClipMask() returns the clip path associated with the image.
%
% The format of the GetImageClipMask method is:
%
% Image *GetImageClipMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageClipMask(const Image *image,
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->clip_mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->clip_mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageException() traverses an image sequence and returns any
% error more severe than noted by the exception parameter.
%
% The format of the GetImageException method is:
%
% void GetImageException(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a list of one or more images.
%
% o exception: return the highest severity exception.
%
*/
MagickExport void GetImageException(Image *image,ExceptionInfo *exception)
{
register Image
*next;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->exception.severity == UndefinedException)
continue;
if (next->exception.severity > exception->severity)
InheritException(exception,&next->exception);
next->exception.severity=UndefinedException;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) ResetMagickMemory(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorDatabase(BackgroundColor,&image_info->background_color,
exception);
(void) QueryColorDatabase(BorderColor,&image_info->border_color,exception);
(void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception);
(void) QueryColorDatabase(TransparentColor,&image_info->transparent_color,
exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannels() returns the number of pixel channels associated with the
% specified image.
%
% The format of the GetChannels method is:
%
% size_t GetImageChannels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport size_t GetImageChannels(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(image->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
size_t
length;
canonical=MagickFalse;
length=0;
(void) CopyMagickString(filename,format,MaxTextExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
if (*q == '0')
{
ssize_t
value;
value=(ssize_t) strtol(q,&q,10);
(void) value;
}
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format),(size_t) (MaxTextExtent-
(p-format)),p,value);
*q=c;
(void) ConcatenateMagickString(filename,q,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MaxTextExtent];
const char
*value;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
value=(const char *) NULL;
#if 0
/* FUTURE: remove this code. -- Anthony 29 Arpil 2012
Removed as GetMagickProperty() will will never match a "filename:"
string as this is not a 'known' image property.
*/
if ((image_info != (const ImageInfo *) NULL) &&
(image != (const Image *) NULL))
value=GetMagickProperty(image_info,image,pattern);
else
#endif
if (image != (Image *) NULL)
value=GetImageProperty(image,pattern);
if ((value == (const char *) NULL) &&
(image != (Image *) NULL))
value=GetImageArtifact(image,pattern);
if ((value == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
value=GetImageOption(image_info,pattern);
if (value == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-length),value,(size_t)
(MaxTextExtent-(p-format-length)));
length+=strlen(pattern)-1;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MaxTextExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((pixel.red < 0.0) || (pixel.red > QuantumRange) ||
(pixel.red != (QuantumAny) pixel.red))
break;
if ((pixel.green < 0.0) || (pixel.green > QuantumRange) ||
(pixel.green != (QuantumAny) pixel.green))
break;
if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) ||
(pixel.blue != (QuantumAny) pixel.blue))
break;
if (pixel.matte != MagickFalse)
{
if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) ||
(pixel.opacity != (QuantumAny) pixel.opacity))
break;
}
if (pixel.colorspace == CMYKColorspace)
{
if ((pixel.index < 0.0) || (pixel.index > QuantumRange) ||
(pixel.index != (QuantumAny) pixel.index))
break;
}
p++;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MaxTextExtent],
filename[MaxTextExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
(void) CopyMagickString(magick,image->magick,MaxTextExtent);
(void) CopyMagickString(filename,image->filename,MaxTextExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const MagickPixelPacket *background)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const MagickPixelPacket *background)
{
CacheView
*image_view;
ExceptionInfo
*exception;
Image
*image;
ssize_t
y;
MagickBooleanType
status;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickSignature);
assert(background != (const MagickPixelPacket *) NULL);
image=AcquireImage(image_info);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->matte=background->matte;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,background,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) TransformImageColorspace(image,RGBColorspace);
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
/*
Set image background color.
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
*q++=pixel;
if (image->colorspace == CMYKColorspace)
{
register IndexPacket
*restrict indexes;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,index);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannels() sets the number of pixels channels associated with the
% image.
%
% The format of the SetImageChannels method is:
%
% MagickBooleanType SetImageChannels(Image *image,const size_t channels)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channels: The number of pixel channels.
%
*/
MagickExport MagickBooleanType SetImageChannels(Image *image,
const size_t channels)
{
image->channels=channels;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,
% const MagickPixelPacket *color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const MagickPixelPacket *color)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
assert(color != (const MagickPixelPacket *) NULL);
image->colorspace=color->colorspace;
image->matte=color->matte;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,color,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class)
{
image->storage_class=storage_class;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageClipMask() associates a clip path with the image. The clip path
% must be the same dimensions as the image. Set any pixel component of
% the clip path to TransparentOpacity to prevent that corresponding image
% pixel component from being updated when SyncAuthenticPixels() is applied.
%
% The format of the SetImageClipMask method is:
%
% MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clip_mask: the image clip path.
%
*/
MagickExport MagickBooleanType SetImageClipMask(Image *image,
const Image *clip_mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (clip_mask != (const Image *) NULL)
if ((clip_mask->columns != image->columns) ||
(clip_mask->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
image->clip_mask=NewImageList();
if (clip_mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception);
if (image->clip_mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,
% const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,
const size_t columns,const size_t rows)
{
if ((columns == 0) || (rows == 0))
return(MagickFalse);
image->columns=columns;
image->rows=rows;
return(SyncImagePixelCache(image,&image->exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the `magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, `ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: `image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
extension[MaxTextExtent],
filename[MaxTextExtent],
magic[MaxTextExtent],
*q,
subimage[MaxTextExtent];
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
unsigned char
magick[2*MaxTextExtent];
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*subimage='\0';
GetPathComponent(image_info->filename,SubimagePath,subimage);
if (*subimage != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse)
{
if (IsGeometry(subimage) != MagickFalse)
(void) CloneString(&image_info->extract,subimage);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,subimage);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
image_info->subimage=image_info->scene;
image_info->subrange=image_info->number_scenes;
}
}
*extension='\0';
GetPathComponent(image_info->filename,ExtensionPath,extension);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*extension != '\0')
if ((LocaleCompare(extension,"gz") == 0) ||
(LocaleCompare(extension,"Z") == 0) ||
(LocaleCompare(extension,"svgz") == 0) ||
(LocaleCompare(extension,"wmz") == 0))
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*extension != '\0')
if (LocaleCompare(extension,"bz2") == 0)
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if (*extension != '\0')
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"EPHEMERAL",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,extension,MaxTextExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
(void) CopyMagickString(magic,image_info->magick,MaxTextExtent);
else
{
/*
User specified image format.
*/
LocaleUpper(magic);
if (IsMagickConflict(magic) == MagickFalse)
{
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
if (LocaleCompare(magic,"EPHEMERAL") != 0)
image_info->affirm=MagickTrue;
else
image_info->temporary=MagickTrue;
}
}
magick_info=GetMagickInfo(magic,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
GetPathComponent(image_info->filename,CanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,filename);
if ((LocaleCompare(filename,image_info->filename) != 0) &&
(strchr(filename,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
/*
Determine the image format from the first few bytes of the file.
*/
image=AcquireImage(image_info);
(void) CopyMagickString(image->filename,image_info->filename,
MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy standard input or pipe to temporary file.
*/
*filename='\0';
status=ImageToFile(image,filename,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,filename,MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
image_info->temporary=MagickTrue;
}
(void) ResetMagickMemory(magick,0,sizeof(magick));
count=ReadBlob(image,2*MaxTextExtent,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
(void) CopyMagickString(image_info->magick,GetMagicName(magic_info),
MaxTextExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const Image *mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mask: the image mask.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (mask != (const Image *) NULL)
if ((mask->columns != image->columns) || (mask->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=NewImageList();
if (mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception);
if (image->mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e O p a c i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageOpacity() sets the opacity levels of the image.
%
% The format of the SetImageOpacity method is:
%
% MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageOpacity(Image *image,
const Quantum opacity)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
image->matte=MagickTrue;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelOpacity(q,opacity);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const PixelPacket
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const PixelPacket
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(GetPixelOpacity(p) != TransparentOpacity) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
CacheView
*smush_view;
const Image
*image;
Image
*smush_image;
MagickBooleanType
matte,
proceed,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=images;
matte=image->matte;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse)
{
InheritException(exception,&smush_image->exception);
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->matte=matte;
(void) SetImageBackgroundColor(smush_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
smush_view=AcquireVirtualCacheView(smush_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
smush_view=DestroyCacheView(smush_view);
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType StripImage(Image *image)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"EXIF,iCCP,iTXt,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline IndexPacket PushColormapIndex(Image *image,
const size_t index,MagickBooleanType *range_exception)
{
if (index < image->colors)
return((IndexPacket) index);
*range_exception=MagickTrue;
return((IndexPacket) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
range_exception,
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->storage_class == DirectClass)
return(MagickFalse);
range_exception=MagickFalse;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(range_exception,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x),
&range_exception);
if (image->matte == MagickFalse)
SetPixelRgb(q,image->colormap+(ssize_t) index)
else
SetPixelRGBO(q,image->colormap+(ssize_t) index);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageError,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs image_info options into per-image attributes.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image)
{
char
property[MaxTextExtent];
const char
*option,
*value;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->background_color,
&image->exception);
option=GetImageOption(image_info,"bias");
if (option != (const char *) NULL)
image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->border_color,&image->exception);
option=GetImageOption(image_info,"colors");
if (option != (const char *) NULL)
image->colors=StringToUnsignedLong(option);
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
/*
Set image density.
*/
flags=ParseGeometry(option,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(InterpolatePixelMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->matte_color,&image->exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->transparent_color,
&image->exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
else
units = image_info->units;
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->x_resolution=(double) ((size_t) (100.0*2.54*
image->x_resolution+0.5))/100.0;
image->y_resolution=(double) ((size_t) (100.0*2.54*
image->y_resolution+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
}
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
ResetImageOptionIterator(image_info);
for (option=GetNextImageOption(image_info); option != (const char *) NULL; )
{
value=GetImageOption(image_info,option);
if (value != (const char *) NULL)
{
(void) FormatLocaleString(property,MaxTextExtent,"%s",option);
(void) SetImageArtifact(image,property,value);
}
option=GetNextImageOption(image_info);
}
return(MagickTrue);
}
|
requantize_leakyrelu_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void requantize_leakyrelu_pack8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, float slope, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int channels = bottom_blob.c;
int size = w * h;
int scale_in_data_size = scale_in_data.w;
int scale_out_data_size = scale_out_data.w;
int bias_data_size = bias_data.w;
// int8(leakyrelu(v * scale_in, slope) * scale_out)
// int8_leakyrelu(v * (scale_in * scale_out), slope)
// int8(leakyrelu(v * scale_in + bias, slope) * scale_out)
// int8_leakyrelu(v * (scale_in * scale_out) + (bias * scale_out), slope)
if (bias_data_size == 0)
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
float32x4_t _v4 = vcvtq_f32_s32(vld1q_s32(intptr + 16));
float32x4_t _v5 = vcvtq_f32_s32(vld1q_s32(intptr + 20));
float32x4_t _v6 = vcvtq_f32_s32(vld1q_s32(intptr + 24));
float32x4_t _v7 = vcvtq_f32_s32(vld1q_s32(intptr + 28));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
_v2 = vmulq_f32(_v2, _scale0);
_v3 = vmulq_f32(_v3, _scale1);
_v4 = vmulq_f32(_v4, _scale0);
_v5 = vmulq_f32(_v5, _scale1);
_v6 = vmulq_f32(_v6, _scale0);
_v7 = vmulq_f32(_v7, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v2, _v3, _slope));
vst1_s8(ptr + 16, float2int8leakyrelu(_v4, _v5, _slope));
vst1_s8(ptr + 24, float2int8leakyrelu(_v6, _v7, _slope));
intptr += 32;
ptr += 32;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
_v2 = vmulq_f32(_v2, _scale0);
_v3 = vmulq_f32(_v3, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v2, _v3, _slope));
intptr += 16;
ptr += 16;
}
for (; i < size; i++)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
_v0 = vmulq_f32(_v0, _scale0);
_v1 = vmulq_f32(_v1, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
intptr += 8;
ptr += 8;
}
}
}
else
{
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < channels; q++)
{
const int* intptr = bottom_blob.channel(q);
signed char* ptr = top_blob.channel(q);
float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8);
float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4);
float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8);
float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4);
float32x4_t _bias0 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8);
float32x4_t _bias1 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8 + 4);
float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0);
float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1);
_bias0 = vmulq_f32(_bias0, _scale_out0);
_bias1 = vmulq_f32(_bias1, _scale_out1);
float32x4_t _slope = vdupq_n_f32(slope);
int i = 0;
#if __aarch64__
for (; i + 3 < size; i += 4)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
float32x4_t _v4 = vcvtq_f32_s32(vld1q_s32(intptr + 16));
float32x4_t _v5 = vcvtq_f32_s32(vld1q_s32(intptr + 20));
float32x4_t _v6 = vcvtq_f32_s32(vld1q_s32(intptr + 24));
float32x4_t _v7 = vcvtq_f32_s32(vld1q_s32(intptr + 28));
_v0 = vfmaq_f32(_bias0, _v0, _scale0);
_v1 = vfmaq_f32(_bias1, _v1, _scale1);
_v2 = vfmaq_f32(_bias0, _v2, _scale0);
_v3 = vfmaq_f32(_bias1, _v3, _scale1);
_v4 = vfmaq_f32(_bias0, _v4, _scale0);
_v5 = vfmaq_f32(_bias1, _v5, _scale1);
_v6 = vfmaq_f32(_bias0, _v6, _scale0);
_v7 = vfmaq_f32(_bias1, _v7, _scale1);
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v2, _v3, _slope));
vst1_s8(ptr + 16, float2int8leakyrelu(_v4, _v5, _slope));
vst1_s8(ptr + 24, float2int8leakyrelu(_v6, _v7, _slope));
intptr += 32;
ptr += 32;
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8));
float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12));
#if __aarch64__
_v0 = vfmaq_f32(_bias0, _v0, _scale0);
_v1 = vfmaq_f32(_bias1, _v1, _scale1);
_v2 = vfmaq_f32(_bias0, _v2, _scale0);
_v3 = vfmaq_f32(_bias1, _v3, _scale1);
#else // __aarch64__
_v0 = vmlaq_f32(_bias0, _v0, _scale0);
_v1 = vmlaq_f32(_bias1, _v1, _scale1);
_v2 = vmlaq_f32(_bias0, _v2, _scale0);
_v3 = vmlaq_f32(_bias1, _v3, _scale1);
#endif // __aarch64__
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
vst1_s8(ptr + 8, float2int8leakyrelu(_v2, _v3, _slope));
intptr += 16;
ptr += 16;
}
for (; i < size; i++)
{
float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr));
float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4));
#if __aarch64__
_v0 = vfmaq_f32(_bias0, _v0, _scale0);
_v1 = vfmaq_f32(_bias1, _v1, _scale1);
#else // __aarch64__
_v0 = vmlaq_f32(_bias0, _v0, _scale0);
_v1 = vmlaq_f32(_bias1, _v1, _scale1);
#endif // __aarch64__
vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope));
intptr += 8;
ptr += 8;
}
}
}
}
|
dpotrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zpotrf.c, normal z -> d, Fri Sep 28 17:38:02 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_potrf
*
* Performs the Cholesky factorization of a symmetric positive definite
* matrix A. The factorization has the form
*
* \f[ A = L \times L^T, \f]
* or
* \f[ A = U^T \times U, \f]
*
* where U is an upper triangular matrix and L is a lower triangular matrix.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, the symmetric positive definite matrix A.
* If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A
* contains the upper triangular part of the matrix A, and the strictly
* lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading N-by-N lower triangular part of A
* contains the lower triangular part of the matrix A, and the strictly
* upper triangular part of A is not referenced.
* On exit, if return value = 0, the factor U or L from the Cholesky
* factorization A = U^T*U or A = L*L^T.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, the leading minor of order i of A is not
* positive definite, so the factorization could not
* be completed, and the solution has not been computed.
*
*******************************************************************************
*
* @sa plasma_omp_dpotrf
* @sa plasma_cpotrf
* @sa plasma_dpotrf
* @sa plasma_spotrf
*
******************************************************************************/
int plasma_dpotrf(plasma_enum_t uplo,
int n,
double *pA, int lda)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imax(n, 0) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_potrf(plasma, PlasmaRealDouble, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_triangular_create(PlasmaRealDouble, uplo, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dtr2desc(pA, lda, A, &sequence, &request);
// Call the tile async function.
plasma_omp_dpotrf(uplo, A, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2tr(A, pA, lda, &sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_potrf
*
* Performs the Cholesky factorization of a symmetric positive definite
* matrix.
* Non-blocking tile version of plasma_dpotrf().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] A
* On entry, the symmetric positive definite matrix A.
* If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A
* contains the upper triangular part of the matrix A, and the strictly
* lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading N-by-N lower triangular part of A
* contains the lower triangular part of the matrix A, and the strictly
* upper triangular part of A is not referenced.
* On exit, if return value = 0, the factor U or L from the Cholesky
* factorization A = U^T*U or A = L*L^T.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dpotrf
* @sa plasma_omp_dpotrf
* @sa plasma_omp_cpotrf
* @sa plasma_omp_dpotrf
* @sa plasma_omp_spotrf
*
******************************************************************************/
void plasma_omp_dpotrf(plasma_enum_t uplo, plasma_desc_t A,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.m == 0)
return;
// Call the parallel function.
plasma_pdpotrf(uplo, A, sequence, request);
}
|
convolution_3x3_pack8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 8b-8a-inch/8a-64-outch/8b
kernel_tm_pack8.create(inch / 8, 64, outch / 8, (size_t)2u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 64; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = k0.row(p + i);
const float* k10 = k1.row(p + i);
const float* k20 = k2.row(p + i);
const float* k30 = k3.row(p + i);
const float* k40 = k4.row(p + i);
const float* k50 = k5.row(p + i);
const float* k60 = k6.row(p + i);
const float* k70 = k7.row(p + i);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00 += 8;
}
}
}
}
}
static void conv3x3s1_winograd64_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const __fp16* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
__fp16 tmp[8][8][8];
// tile
for (int i = 0; i < h_tm / 8; i++)
{
for (int j = 0; j < w_tm / 8; j++)
{
const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8;
for (int m = 0; m < 8; m++)
{
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r03 = vld1q_f16(r0 + 24);
float16x8_t _r04 = vld1q_f16(r0 + 32);
float16x8_t _r05 = vld1q_f16(r0 + 40);
float16x8_t _r06 = vld1q_f16(r0 + 48);
float16x8_t _r07 = vld1q_f16(r0 + 56);
float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f);
float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[7][m], _tmp7m);
// tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
// tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f);
float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f);
// float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
// float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b);
float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b);
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[2][m], _tmp2m);
// tmp[1][m] = tmp12a + tmp12b;
// tmp[2][m] = tmp12a - tmp12b;
float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f);
float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
// float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
// float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b);
float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b);
vst1q_f16(tmp[3][m], _tmp3m);
vst1q_f16(tmp[4][m], _tmp4m);
// tmp[3][m] = tmp34a + tmp34b;
// tmp[4][m] = tmp34a - tmp34b;
float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f);
float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
// float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
// float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b);
float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b);
vst1q_f16(tmp[5][m], _tmp5m);
vst1q_f16(tmp[6][m], _tmp6m);
// tmp[5][m] = tmp56a + tmp56b;
// tmp[6][m] = tmp56a - tmp56b;
r0 += w * 8;
}
__fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 8;
__fp16* r0_tm_1 = r0_tm_0 + tiles * 8;
__fp16* r0_tm_2 = r0_tm_0 + tiles * 16;
__fp16* r0_tm_3 = r0_tm_0 + tiles * 24;
__fp16* r0_tm_4 = r0_tm_0 + tiles * 32;
__fp16* r0_tm_5 = r0_tm_0 + tiles * 40;
__fp16* r0_tm_6 = r0_tm_0 + tiles * 48;
__fp16* r0_tm_7 = r0_tm_0 + tiles * 56;
for (int m = 0; m < 8; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _tmp06 = vld1q_f16(tmp[m][6]);
float16x8_t _tmp07 = vld1q_f16(tmp[m][7]);
float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f);
float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f);
// r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
// r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f);
float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f);
// float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
// float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25);
float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b);
float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b);
// r0_tm[1] = tmp12a + tmp12b;
// r0_tm[2] = tmp12a - tmp12b;
float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
// float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
// float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b);
float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b);
// r0_tm[3] = tmp34a + tmp34b;
// r0_tm[4] = tmp34a - tmp34b;
float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f);
float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
// float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
// float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b);
float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b);
// r0_tm[5] = tmp56a + tmp56b;
// r0_tm[6] = tmp56a - tmp56b;
vst1q_f16(r0_tm_0, _r0tm0);
vst1q_f16(r0_tm_1, _r0tm1);
vst1q_f16(r0_tm_2, _r0tm2);
vst1q_f16(r0_tm_3, _r0tm3);
vst1q_f16(r0_tm_4, _r0tm4);
vst1q_f16(r0_tm_5, _r0tm5);
vst1q_f16(r0_tm_6, _r0tm6);
vst1q_f16(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 64;
r0_tm_1 += tiles * 64;
r0_tm_2 += tiles * 64;
r0_tm_3 += tiles * 64;
r0_tm_4 += tiles * 64;
r0_tm_5 += tiles * 64;
r0_tm_6 += tiles * 64;
r0_tm_7 += tiles * 64;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
__fp16* tm2p = tm2.row<__fp16>(i / 12);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 12x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n"
"ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n"
"sub %0, %0, #128 \n"
"uzp1 v20.8h, v0.8h, v4.8h \n" // 0
"uzp1 v21.8h, v16.8h, v1.8h \n" // 1
"uzp1 v22.8h, v5.8h, v17.8h \n" // 2
"uzp1 v23.8h, v2.8h, v6.8h \n" // 3
"uzp1 v24.8h, v18.8h, v3.8h \n" // 4
"uzp1 v25.8h, v7.8h, v19.8h \n" // 5
"uzp2 v26.8h, v0.8h, v4.8h \n" // 6
"uzp2 v27.8h, v16.8h, v1.8h \n" // 7
"uzp2 v28.8h, v5.8h, v17.8h \n" // 8
"uzp2 v29.8h, v2.8h, v6.8h \n" // 9
"uzp2 v30.8h, v18.8h, v3.8h \n" // 10
"uzp2 v31.8h, v7.8h, v19.8h \n" // 11
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 7 < tiles; i += 8)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 1 < tiles; i += 2)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h}, [%0] \n"
"st1 {v0.8h, v1.8h}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0");
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123
"fmla v20.8h, v12.8h, v0.h[0] \n"
"fmla v21.8h, v12.8h, v0.h[1] \n"
"fmla v22.8h, v12.8h, v0.h[2] \n"
"fmla v23.8h, v12.8h, v0.h[3] \n"
"fmla v24.8h, v12.8h, v0.h[4] \n"
"fmla v25.8h, v12.8h, v0.h[5] \n"
"fmla v26.8h, v12.8h, v0.h[6] \n"
"fmla v27.8h, v12.8h, v0.h[7] \n"
"fmla v28.8h, v12.8h, v1.h[0] \n"
"fmla v29.8h, v12.8h, v1.h[1] \n"
"fmla v30.8h, v12.8h, v1.h[2] \n"
"fmla v31.8h, v12.8h, v1.h[3] \n"
"fmla v20.8h, v13.8h, v1.h[4] \n"
"fmla v21.8h, v13.8h, v1.h[5] \n"
"fmla v22.8h, v13.8h, v1.h[6] \n"
"fmla v23.8h, v13.8h, v1.h[7] \n"
"fmla v24.8h, v13.8h, v2.h[0] \n"
"fmla v25.8h, v13.8h, v2.h[1] \n"
"fmla v26.8h, v13.8h, v2.h[2] \n"
"fmla v27.8h, v13.8h, v2.h[3] \n"
"fmla v28.8h, v13.8h, v2.h[4] \n"
"fmla v29.8h, v13.8h, v2.h[5] \n"
"fmla v30.8h, v13.8h, v2.h[6] \n"
"fmla v31.8h, v13.8h, v2.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v20.8h, v14.8h, v3.h[0] \n"
"fmla v21.8h, v14.8h, v3.h[1] \n"
"fmla v22.8h, v14.8h, v3.h[2] \n"
"fmla v23.8h, v14.8h, v3.h[3] \n"
"fmla v24.8h, v14.8h, v3.h[4] \n"
"fmla v25.8h, v14.8h, v3.h[5] \n"
"fmla v26.8h, v14.8h, v3.h[6] \n"
"fmla v27.8h, v14.8h, v3.h[7] \n"
"fmla v28.8h, v14.8h, v4.h[0] \n"
"fmla v29.8h, v14.8h, v4.h[1] \n"
"fmla v30.8h, v14.8h, v4.h[2] \n"
"fmla v31.8h, v14.8h, v4.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567
"fmla v20.8h, v15.8h, v4.h[4] \n"
"fmla v21.8h, v15.8h, v4.h[5] \n"
"fmla v22.8h, v15.8h, v4.h[6] \n"
"fmla v23.8h, v15.8h, v4.h[7] \n"
"fmla v24.8h, v15.8h, v5.h[0] \n"
"fmla v25.8h, v15.8h, v5.h[1] \n"
"fmla v26.8h, v15.8h, v5.h[2] \n"
"fmla v27.8h, v15.8h, v5.h[3] \n"
"fmla v28.8h, v15.8h, v5.h[4] \n"
"fmla v29.8h, v15.8h, v5.h[5] \n"
"fmla v30.8h, v15.8h, v5.h[6] \n"
"fmla v31.8h, v15.8h, v5.h[7] \n"
"fmla v20.8h, v16.8h, v6.h[0] \n"
"fmla v21.8h, v16.8h, v6.h[1] \n"
"fmla v22.8h, v16.8h, v6.h[2] \n"
"fmla v23.8h, v16.8h, v6.h[3] \n"
"fmla v24.8h, v16.8h, v6.h[4] \n"
"fmla v25.8h, v16.8h, v6.h[5] \n"
"fmla v26.8h, v16.8h, v6.h[6] \n"
"fmla v27.8h, v16.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v7.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[1] \n"
"fmla v30.8h, v16.8h, v7.h[2] \n"
"fmla v31.8h, v16.8h, v7.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011
"fmla v20.8h, v17.8h, v7.h[4] \n"
"fmla v21.8h, v17.8h, v7.h[5] \n"
"fmla v22.8h, v17.8h, v7.h[6] \n"
"fmla v23.8h, v17.8h, v7.h[7] \n"
"fmla v24.8h, v17.8h, v8.h[0] \n"
"fmla v25.8h, v17.8h, v8.h[1] \n"
"fmla v26.8h, v17.8h, v8.h[2] \n"
"fmla v27.8h, v17.8h, v8.h[3] \n"
"fmla v28.8h, v17.8h, v8.h[4] \n"
"fmla v29.8h, v17.8h, v8.h[5] \n"
"fmla v30.8h, v17.8h, v8.h[6] \n"
"fmla v31.8h, v17.8h, v8.h[7] \n"
"fmla v20.8h, v18.8h, v9.h[0] \n"
"fmla v21.8h, v18.8h, v9.h[1] \n"
"fmla v22.8h, v18.8h, v9.h[2] \n"
"fmla v23.8h, v18.8h, v9.h[3] \n"
"fmla v24.8h, v18.8h, v9.h[4] \n"
"fmla v25.8h, v18.8h, v9.h[5] \n"
"fmla v26.8h, v18.8h, v9.h[6] \n"
"fmla v27.8h, v18.8h, v9.h[7] \n"
"fmla v28.8h, v18.8h, v10.h[0] \n"
"fmla v29.8h, v18.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v10.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.8h, v19.8h, v10.h[4] \n"
"fmla v21.8h, v19.8h, v10.h[5] \n"
"fmla v22.8h, v19.8h, v10.h[6] \n"
"fmla v23.8h, v19.8h, v10.h[7] \n"
"fmla v24.8h, v19.8h, v11.h[0] \n"
"fmla v25.8h, v19.8h, v11.h[1] \n"
"fmla v26.8h, v19.8h, v11.h[2] \n"
"fmla v27.8h, v19.8h, v11.h[3] \n"
"fmla v28.8h, v19.8h, v11.h[4] \n"
"fmla v29.8h, v19.8h, v11.h[5] \n"
"fmla v30.8h, v19.8h, v11.h[6] \n"
"fmla v31.8h, v19.8h, v11.h[7] \n"
"bne 0b \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v0.h[1] \n"
"fmla v18.8h, v8.8h, v0.h[2] \n"
"fmla v19.8h, v8.8h, v0.h[3] \n"
"fmla v20.8h, v8.8h, v0.h[4] \n"
"fmla v21.8h, v8.8h, v0.h[5] \n"
"fmla v22.8h, v8.8h, v0.h[6] \n"
"fmla v23.8h, v8.8h, v0.h[7] \n"
"fmla v16.8h, v9.8h, v1.h[0] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v1.h[2] \n"
"fmla v19.8h, v9.8h, v1.h[3] \n"
"fmla v20.8h, v9.8h, v1.h[4] \n"
"fmla v21.8h, v9.8h, v1.h[5] \n"
"fmla v22.8h, v9.8h, v1.h[6] \n"
"fmla v23.8h, v9.8h, v1.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v16.8h, v10.8h, v2.h[0] \n"
"fmla v17.8h, v10.8h, v2.h[1] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v2.h[3] \n"
"fmla v20.8h, v10.8h, v2.h[4] \n"
"fmla v21.8h, v10.8h, v2.h[5] \n"
"fmla v22.8h, v10.8h, v2.h[6] \n"
"fmla v23.8h, v10.8h, v2.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v11.8h, v3.h[0] \n"
"fmla v17.8h, v11.8h, v3.h[1] \n"
"fmla v18.8h, v11.8h, v3.h[2] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v20.8h, v11.8h, v3.h[4] \n"
"fmla v21.8h, v11.8h, v3.h[5] \n"
"fmla v22.8h, v11.8h, v3.h[6] \n"
"fmla v23.8h, v11.8h, v3.h[7] \n"
"fmla v16.8h, v12.8h, v4.h[0] \n"
"fmla v17.8h, v12.8h, v4.h[1] \n"
"fmla v18.8h, v12.8h, v4.h[2] \n"
"fmla v19.8h, v12.8h, v4.h[3] \n"
"fmla v20.8h, v12.8h, v4.h[4] \n"
"fmla v21.8h, v12.8h, v4.h[5] \n"
"fmla v22.8h, v12.8h, v4.h[6] \n"
"fmla v23.8h, v12.8h, v4.h[7] \n"
"fmla v16.8h, v13.8h, v5.h[0] \n"
"fmla v17.8h, v13.8h, v5.h[1] \n"
"fmla v18.8h, v13.8h, v5.h[2] \n"
"fmla v19.8h, v13.8h, v5.h[3] \n"
"fmla v20.8h, v13.8h, v5.h[4] \n"
"fmla v21.8h, v13.8h, v5.h[5] \n"
"fmla v22.8h, v13.8h, v5.h[6] \n"
"fmla v23.8h, v13.8h, v5.h[7] \n"
"fmla v16.8h, v14.8h, v6.h[0] \n"
"fmla v17.8h, v14.8h, v6.h[1] \n"
"fmla v18.8h, v14.8h, v6.h[2] \n"
"fmla v19.8h, v14.8h, v6.h[3] \n"
"fmla v20.8h, v14.8h, v6.h[4] \n"
"fmla v21.8h, v14.8h, v6.h[5] \n"
"fmla v22.8h, v14.8h, v6.h[6] \n"
"fmla v23.8h, v14.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v7.h[0] \n"
"fmla v17.8h, v15.8h, v7.h[1] \n"
"fmla v18.8h, v15.8h, v7.h[2] \n"
"fmla v19.8h, v15.8h, v7.h[3] \n"
"fmla v20.8h, v15.8h, v7.h[4] \n"
"fmla v21.8h, v15.8h, v7.h[5] \n"
"fmla v22.8h, v15.8h, v7.h[6] \n"
"fmla v23.8h, v15.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v18.8h, v8.8h, v2.h[0] \n"
"fmla v19.8h, v8.8h, v3.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v2.h[1] \n"
"fmla v19.8h, v9.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v3.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v18.8h, v11.8h, v2.h[3] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v18.8h, v12.8h, v2.h[4] \n"
"fmla v19.8h, v12.8h, v3.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v18.8h, v13.8h, v2.h[5] \n"
"fmla v19.8h, v13.8h, v3.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"fmla v18.8h, v14.8h, v2.h[6] \n"
"fmla v19.8h, v14.8h, v3.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"fmla v18.8h, v15.8h, v2.h[7] \n"
"fmla v19.8h, v15.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i + 1 < tiles; i += 2)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.8h}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v16.8h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm / 8 * h_tm / 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
float16x8_t _bias0 = bias ? vld1q_f16((const __fp16*)bias + p * 8) : vdupq_n_f16(0.f);
__fp16 tmp[6][8][8];
// tile
for (int i = 0; i < outh / 6; i++)
{
for (int j = 0; j < outw / 6; j++)
{
// top_blob_tm.create(tiles, 64, outch, elemsize, elempack);
const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 8;
const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8;
const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16;
const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24;
const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32;
const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40;
const __fp16* output0_tm_6 = output0_tm_0 + tiles * 48;
const __fp16* output0_tm_7 = output0_tm_0 + tiles * 56;
__fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 8;
// TODO neon optimize
for (int m = 0; m < 8; m++)
{
float16x8_t _out0tm0 = vld1q_f16(output0_tm_0);
float16x8_t _out0tm1 = vld1q_f16(output0_tm_1);
float16x8_t _out0tm2 = vld1q_f16(output0_tm_2);
float16x8_t _out0tm3 = vld1q_f16(output0_tm_3);
float16x8_t _out0tm4 = vld1q_f16(output0_tm_4);
float16x8_t _out0tm5 = vld1q_f16(output0_tm_5);
float16x8_t _out0tm6 = vld1q_f16(output0_tm_6);
float16x8_t _out0tm7 = vld1q_f16(output0_tm_7);
float16x8_t _tmp024a = vaddq_f16(_out0tm1, _out0tm2);
float16x8_t _tmp135a = vsubq_f16(_out0tm1, _out0tm2);
// float tmp024a = output0_tm[1] + output0_tm[2];
// float tmp135a = output0_tm[1] - output0_tm[2];
float16x8_t _tmp024b = vaddq_f16(_out0tm3, _out0tm4);
float16x8_t _tmp135b = vsubq_f16(_out0tm3, _out0tm4);
// float tmp024b = output0_tm[3] + output0_tm[4];
// float tmp135b = output0_tm[3] - output0_tm[4];
float16x8_t _tmp024c = vaddq_f16(_out0tm5, _out0tm6);
float16x8_t _tmp135c = vsubq_f16(_out0tm5, _out0tm6);
// float tmp024c = output0_tm[5] + output0_tm[6];
// float tmp135c = output0_tm[5] - output0_tm[6];
float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f));
float16x8_t _tmp2m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float16x8_t _tmp4m = vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[2][m], _tmp2m);
vst1q_f16(tmp[4][m], _tmp4m);
// tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
// tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
// tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float16x8_t _tmp1m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float16x8_t _tmp3m = vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float16x8_t _tmp5m = vaddq_f16(vaddq_f16(_out0tm7, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f));
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[3][m], _tmp3m);
vst1q_f16(tmp[5][m], _tmp5m);
// tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
// tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
// tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += tiles * 64;
output0_tm_1 += tiles * 64;
output0_tm_2 += tiles * 64;
output0_tm_3 += tiles * 64;
output0_tm_4 += tiles * 64;
output0_tm_5 += tiles * 64;
output0_tm_6 += tiles * 64;
output0_tm_7 += tiles * 64;
}
for (int m = 0; m < 6; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _tmp06 = vld1q_f16(tmp[m][6]);
float16x8_t _tmp07 = vld1q_f16(tmp[m][7]);
float16x8_t _tmp024a = vaddq_f16(_tmp01, _tmp02);
float16x8_t _tmp135a = vsubq_f16(_tmp01, _tmp02);
// float tmp024a = tmp0[1] + tmp0[2];
// float tmp135a = tmp0[1] - tmp0[2];
float16x8_t _tmp024b = vaddq_f16(_tmp03, _tmp04);
float16x8_t _tmp135b = vsubq_f16(_tmp03, _tmp04);
// float tmp024b = tmp0[3] + tmp0[4];
// float tmp135b = tmp0[3] - tmp0[4];
float16x8_t _tmp024c = vaddq_f16(_tmp05, _tmp06);
float16x8_t _tmp135c = vsubq_f16(_tmp05, _tmp06);
// float tmp024c = tmp0[5] + tmp0[6];
// float tmp135c = tmp0[5] - tmp0[6];
float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp024a), vfmaq_n_f16(_tmp024b, _tmp024c, 32.f)));
float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float16x8_t _out04 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1q_f16(output0, _out00);
vst1q_f16(output0 + 16, _out02);
vst1q_f16(output0 + 32, _out04);
// output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
// output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
// output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vfmaq_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float16x8_t _out05 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp07, _tmp135a), vfmaq_n_f16(_tmp135c, _tmp135b, 32.f)));
vst1q_f16(output0 + 8, _out01);
vst1q_f16(output0 + 24, _out03);
vst1q_f16(output0 + 40, _out05);
// output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
// output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
// output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw * 8;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd42_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch)
{
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 8b-8a-inch/8a-36-outch/8b
kernel_tm_pack8.create(inch / 8, 36, outch / 8, (size_t)2u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel_tm.channel(q);
const Mat k1 = kernel_tm.channel(q + 1);
const Mat k2 = kernel_tm.channel(q + 2);
const Mat k3 = kernel_tm.channel(q + 3);
const Mat k4 = kernel_tm.channel(q + 4);
const Mat k5 = kernel_tm.channel(q + 5);
const Mat k6 = kernel_tm.channel(q + 6);
const Mat k7 = kernel_tm.channel(q + 7);
Mat g0 = kernel_tm_pack8.channel(q / 8);
for (int k = 0; k < 36; k++)
{
__fp16* g00 = g0.row<__fp16>(k);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = k0.row(p + i);
const float* k10 = k1.row(p + i);
const float* k20 = k2.row(p + i);
const float* k30 = k3.row(p + i);
const float* k40 = k4.row(p + i);
const float* k50 = k5.row(p + i);
const float* k60 = k6.row(p + i);
const float* k70 = k7.row(p + i);
g00[0] = (__fp16)k00[k];
g00[1] = (__fp16)k10[k];
g00[2] = (__fp16)k20[k];
g00[3] = (__fp16)k30[k];
g00[4] = (__fp16)k40[k];
g00[5] = (__fp16)k50[k];
g00[6] = (__fp16)k60[k];
g00[7] = (__fp16)k70[k];
g00 += 8;
}
}
}
}
}
static void conv3x3s1_winograd42_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
const __fp16* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
__fp16 tmp[6][6][8];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const __fp16* r0 = img0.row<const __fp16>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
float16x8_t _r00 = vld1q_f16(r0);
float16x8_t _r01 = vld1q_f16(r0 + 8);
float16x8_t _r02 = vld1q_f16(r0 + 16);
float16x8_t _r03 = vld1q_f16(r0 + 24);
float16x8_t _r04 = vld1q_f16(r0 + 32);
float16x8_t _r05 = vld1q_f16(r0 + 40);
float16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f);
float16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f);
float16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f);
float16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f);
float16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f);
float16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[2][m], _tmp2m);
vst1q_f16(tmp[3][m], _tmp3m);
vst1q_f16(tmp[4][m], _tmp4m);
vst1q_f16(tmp[5][m], _tmp5m);
r0 += w * 8;
}
__fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 6 + j) * 8;
__fp16* r0_tm_1 = r0_tm_0 + tiles * 8;
__fp16* r0_tm_2 = r0_tm_0 + tiles * 16;
__fp16* r0_tm_3 = r0_tm_0 + tiles * 24;
__fp16* r0_tm_4 = r0_tm_0 + tiles * 32;
__fp16* r0_tm_5 = r0_tm_0 + tiles * 40;
for (int m = 0; m < 6; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _r0tm0 = vfmsq_n_f16(vfmaq_n_f16(_tmp04, _tmp00, 4.f), _tmp02, 5.f);
float16x8_t _r0tm1 = vfmsq_n_f16(vaddq_f16(_tmp04, _tmp03), vaddq_f16(_tmp01, _tmp02), 4.f);
float16x8_t _r0tm2 = vfmaq_n_f16(vsubq_f16(_tmp04, _tmp03), vsubq_f16(_tmp01, _tmp02), 4.f);
float16x8_t _r0tm3 = vfmsq_n_f16(vsubq_f16(_tmp04, _tmp02), vsubq_f16(_tmp01, _tmp03), 2.f);
float16x8_t _r0tm4 = vfmaq_n_f16(vsubq_f16(_tmp04, _tmp02), vsubq_f16(_tmp01, _tmp03), 2.f);
float16x8_t _r0tm5 = vfmsq_n_f16(vfmaq_n_f16(_tmp05, _tmp01, 4.f), _tmp03, 5.f);
vst1q_f16(r0_tm_0, _r0tm0);
vst1q_f16(r0_tm_1, _r0tm1);
vst1q_f16(r0_tm_2, _r0tm2);
vst1q_f16(r0_tm_3, _r0tm3);
vst1q_f16(r0_tm_4, _r0tm4);
vst1q_f16(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 48;
r0_tm_1 += tiles * 48;
r0_tm_2 += tiles * 48;
r0_tm_3 += tiles * 48;
r0_tm_4 += tiles * 48;
r0_tm_5 += tiles * 48;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
__fp16* tm2p = tm2.row<__fp16>(i / 12);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 12x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n"
"ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n"
"sub %0, %0, #128 \n"
"uzp1 v20.8h, v0.8h, v4.8h \n" // 0
"uzp1 v21.8h, v16.8h, v1.8h \n" // 1
"uzp1 v22.8h, v5.8h, v17.8h \n" // 2
"uzp1 v23.8h, v2.8h, v6.8h \n" // 3
"uzp1 v24.8h, v18.8h, v3.8h \n" // 4
"uzp1 v25.8h, v7.8h, v19.8h \n" // 5
"uzp2 v26.8h, v0.8h, v4.8h \n" // 6
"uzp2 v27.8h, v16.8h, v1.8h \n" // 7
"uzp2 v28.8h, v5.8h, v17.8h \n" // 8
"uzp2 v29.8h, v2.8h, v6.8h \n" // 9
"uzp2 v30.8h, v18.8h, v3.8h \n" // 10
"uzp2 v31.8h, v7.8h, v19.8h \n" // 11
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 7 < tiles; i += 8)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 3 < tiles; i += 4)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i + 1 < tiles; i += 2)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h}, [%0] \n"
"st1 {v0.8h, v1.8h}, [%1], #32 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0", "v1");
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
__fp16* tmpptr = tm2.row<__fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tmpptr) // %1
: "0"(r0),
"1"(tmpptr)
: "memory", "v0");
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 2u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123
"fmla v20.8h, v12.8h, v0.h[0] \n"
"fmla v21.8h, v12.8h, v0.h[1] \n"
"fmla v22.8h, v12.8h, v0.h[2] \n"
"fmla v23.8h, v12.8h, v0.h[3] \n"
"fmla v24.8h, v12.8h, v0.h[4] \n"
"fmla v25.8h, v12.8h, v0.h[5] \n"
"fmla v26.8h, v12.8h, v0.h[6] \n"
"fmla v27.8h, v12.8h, v0.h[7] \n"
"fmla v28.8h, v12.8h, v1.h[0] \n"
"fmla v29.8h, v12.8h, v1.h[1] \n"
"fmla v30.8h, v12.8h, v1.h[2] \n"
"fmla v31.8h, v12.8h, v1.h[3] \n"
"fmla v20.8h, v13.8h, v1.h[4] \n"
"fmla v21.8h, v13.8h, v1.h[5] \n"
"fmla v22.8h, v13.8h, v1.h[6] \n"
"fmla v23.8h, v13.8h, v1.h[7] \n"
"fmla v24.8h, v13.8h, v2.h[0] \n"
"fmla v25.8h, v13.8h, v2.h[1] \n"
"fmla v26.8h, v13.8h, v2.h[2] \n"
"fmla v27.8h, v13.8h, v2.h[3] \n"
"fmla v28.8h, v13.8h, v2.h[4] \n"
"fmla v29.8h, v13.8h, v2.h[5] \n"
"fmla v30.8h, v13.8h, v2.h[6] \n"
"fmla v31.8h, v13.8h, v2.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v20.8h, v14.8h, v3.h[0] \n"
"fmla v21.8h, v14.8h, v3.h[1] \n"
"fmla v22.8h, v14.8h, v3.h[2] \n"
"fmla v23.8h, v14.8h, v3.h[3] \n"
"fmla v24.8h, v14.8h, v3.h[4] \n"
"fmla v25.8h, v14.8h, v3.h[5] \n"
"fmla v26.8h, v14.8h, v3.h[6] \n"
"fmla v27.8h, v14.8h, v3.h[7] \n"
"fmla v28.8h, v14.8h, v4.h[0] \n"
"fmla v29.8h, v14.8h, v4.h[1] \n"
"fmla v30.8h, v14.8h, v4.h[2] \n"
"fmla v31.8h, v14.8h, v4.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567
"fmla v20.8h, v15.8h, v4.h[4] \n"
"fmla v21.8h, v15.8h, v4.h[5] \n"
"fmla v22.8h, v15.8h, v4.h[6] \n"
"fmla v23.8h, v15.8h, v4.h[7] \n"
"fmla v24.8h, v15.8h, v5.h[0] \n"
"fmla v25.8h, v15.8h, v5.h[1] \n"
"fmla v26.8h, v15.8h, v5.h[2] \n"
"fmla v27.8h, v15.8h, v5.h[3] \n"
"fmla v28.8h, v15.8h, v5.h[4] \n"
"fmla v29.8h, v15.8h, v5.h[5] \n"
"fmla v30.8h, v15.8h, v5.h[6] \n"
"fmla v31.8h, v15.8h, v5.h[7] \n"
"fmla v20.8h, v16.8h, v6.h[0] \n"
"fmla v21.8h, v16.8h, v6.h[1] \n"
"fmla v22.8h, v16.8h, v6.h[2] \n"
"fmla v23.8h, v16.8h, v6.h[3] \n"
"fmla v24.8h, v16.8h, v6.h[4] \n"
"fmla v25.8h, v16.8h, v6.h[5] \n"
"fmla v26.8h, v16.8h, v6.h[6] \n"
"fmla v27.8h, v16.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v7.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[1] \n"
"fmla v30.8h, v16.8h, v7.h[2] \n"
"fmla v31.8h, v16.8h, v7.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011
"fmla v20.8h, v17.8h, v7.h[4] \n"
"fmla v21.8h, v17.8h, v7.h[5] \n"
"fmla v22.8h, v17.8h, v7.h[6] \n"
"fmla v23.8h, v17.8h, v7.h[7] \n"
"fmla v24.8h, v17.8h, v8.h[0] \n"
"fmla v25.8h, v17.8h, v8.h[1] \n"
"fmla v26.8h, v17.8h, v8.h[2] \n"
"fmla v27.8h, v17.8h, v8.h[3] \n"
"fmla v28.8h, v17.8h, v8.h[4] \n"
"fmla v29.8h, v17.8h, v8.h[5] \n"
"fmla v30.8h, v17.8h, v8.h[6] \n"
"fmla v31.8h, v17.8h, v8.h[7] \n"
"fmla v20.8h, v18.8h, v9.h[0] \n"
"fmla v21.8h, v18.8h, v9.h[1] \n"
"fmla v22.8h, v18.8h, v9.h[2] \n"
"fmla v23.8h, v18.8h, v9.h[3] \n"
"fmla v24.8h, v18.8h, v9.h[4] \n"
"fmla v25.8h, v18.8h, v9.h[5] \n"
"fmla v26.8h, v18.8h, v9.h[6] \n"
"fmla v27.8h, v18.8h, v9.h[7] \n"
"fmla v28.8h, v18.8h, v10.h[0] \n"
"fmla v29.8h, v18.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v10.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.8h, v19.8h, v10.h[4] \n"
"fmla v21.8h, v19.8h, v10.h[5] \n"
"fmla v22.8h, v19.8h, v10.h[6] \n"
"fmla v23.8h, v19.8h, v10.h[7] \n"
"fmla v24.8h, v19.8h, v11.h[0] \n"
"fmla v25.8h, v19.8h, v11.h[1] \n"
"fmla v26.8h, v19.8h, v11.h[2] \n"
"fmla v27.8h, v19.8h, v11.h[3] \n"
"fmla v28.8h, v19.8h, v11.h[4] \n"
"fmla v29.8h, v19.8h, v11.h[5] \n"
"fmla v30.8h, v19.8h, v11.h[6] \n"
"fmla v31.8h, v19.8h, v11.h[7] \n"
"bne 0b \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v0.h[1] \n"
"fmla v18.8h, v8.8h, v0.h[2] \n"
"fmla v19.8h, v8.8h, v0.h[3] \n"
"fmla v20.8h, v8.8h, v0.h[4] \n"
"fmla v21.8h, v8.8h, v0.h[5] \n"
"fmla v22.8h, v8.8h, v0.h[6] \n"
"fmla v23.8h, v8.8h, v0.h[7] \n"
"fmla v16.8h, v9.8h, v1.h[0] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v1.h[2] \n"
"fmla v19.8h, v9.8h, v1.h[3] \n"
"fmla v20.8h, v9.8h, v1.h[4] \n"
"fmla v21.8h, v9.8h, v1.h[5] \n"
"fmla v22.8h, v9.8h, v1.h[6] \n"
"fmla v23.8h, v9.8h, v1.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v16.8h, v10.8h, v2.h[0] \n"
"fmla v17.8h, v10.8h, v2.h[1] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v2.h[3] \n"
"fmla v20.8h, v10.8h, v2.h[4] \n"
"fmla v21.8h, v10.8h, v2.h[5] \n"
"fmla v22.8h, v10.8h, v2.h[6] \n"
"fmla v23.8h, v10.8h, v2.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v11.8h, v3.h[0] \n"
"fmla v17.8h, v11.8h, v3.h[1] \n"
"fmla v18.8h, v11.8h, v3.h[2] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v20.8h, v11.8h, v3.h[4] \n"
"fmla v21.8h, v11.8h, v3.h[5] \n"
"fmla v22.8h, v11.8h, v3.h[6] \n"
"fmla v23.8h, v11.8h, v3.h[7] \n"
"fmla v16.8h, v12.8h, v4.h[0] \n"
"fmla v17.8h, v12.8h, v4.h[1] \n"
"fmla v18.8h, v12.8h, v4.h[2] \n"
"fmla v19.8h, v12.8h, v4.h[3] \n"
"fmla v20.8h, v12.8h, v4.h[4] \n"
"fmla v21.8h, v12.8h, v4.h[5] \n"
"fmla v22.8h, v12.8h, v4.h[6] \n"
"fmla v23.8h, v12.8h, v4.h[7] \n"
"fmla v16.8h, v13.8h, v5.h[0] \n"
"fmla v17.8h, v13.8h, v5.h[1] \n"
"fmla v18.8h, v13.8h, v5.h[2] \n"
"fmla v19.8h, v13.8h, v5.h[3] \n"
"fmla v20.8h, v13.8h, v5.h[4] \n"
"fmla v21.8h, v13.8h, v5.h[5] \n"
"fmla v22.8h, v13.8h, v5.h[6] \n"
"fmla v23.8h, v13.8h, v5.h[7] \n"
"fmla v16.8h, v14.8h, v6.h[0] \n"
"fmla v17.8h, v14.8h, v6.h[1] \n"
"fmla v18.8h, v14.8h, v6.h[2] \n"
"fmla v19.8h, v14.8h, v6.h[3] \n"
"fmla v20.8h, v14.8h, v6.h[4] \n"
"fmla v21.8h, v14.8h, v6.h[5] \n"
"fmla v22.8h, v14.8h, v6.h[6] \n"
"fmla v23.8h, v14.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v7.h[0] \n"
"fmla v17.8h, v15.8h, v7.h[1] \n"
"fmla v18.8h, v15.8h, v7.h[2] \n"
"fmla v19.8h, v15.8h, v7.h[3] \n"
"fmla v20.8h, v15.8h, v7.h[4] \n"
"fmla v21.8h, v15.8h, v7.h[5] \n"
"fmla v22.8h, v15.8h, v7.h[6] \n"
"fmla v23.8h, v15.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 3 < tiles; i += 4)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v18.8h, v8.8h, v2.h[0] \n"
"fmla v19.8h, v8.8h, v3.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v2.h[1] \n"
"fmla v19.8h, v9.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v3.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v18.8h, v11.8h, v2.h[3] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v18.8h, v12.8h, v2.h[4] \n"
"fmla v19.8h, v12.8h, v3.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v18.8h, v13.8h, v2.h[5] \n"
"fmla v19.8h, v13.8h, v3.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"fmla v18.8h, v14.8h, v2.h[6] \n"
"fmla v19.8h, v14.8h, v3.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"fmla v18.8h, v15.8h, v2.h[7] \n"
"fmla v19.8h, v15.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i + 1 < tiles; i += 2)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
for (; i < tiles; i++)
{
const __fp16* r0 = bb2.row<const __fp16>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* k0 = kernel0_tm.row<const __fp16>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.8h}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v16.8h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(k0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(k0)
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
// const float bias0 = bias ? bias[p] : 0.f;
float16x8_t _bias0 = bias ? vld1q_f16((const __fp16*)bias + p * 8) : vdupq_n_f16(0.f);
__fp16 tmp[4][6][8];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, elemsize, elempack);
const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 6 + j) * 8;
const __fp16* output0_tm_1 = output0_tm_0 + tiles * 8;
const __fp16* output0_tm_2 = output0_tm_0 + tiles * 16;
const __fp16* output0_tm_3 = output0_tm_0 + tiles * 24;
const __fp16* output0_tm_4 = output0_tm_0 + tiles * 32;
const __fp16* output0_tm_5 = output0_tm_0 + tiles * 40;
__fp16* output0 = out0.row<__fp16>(i * 4) + (j * 4) * 8;
// TODO neon optimize
for (int m = 0; m < 6; m++)
{
float16x8_t _out0tm0 = vld1q_f16(output0_tm_0);
float16x8_t _out0tm1 = vld1q_f16(output0_tm_1);
float16x8_t _out0tm2 = vld1q_f16(output0_tm_2);
float16x8_t _out0tm3 = vld1q_f16(output0_tm_3);
float16x8_t _out0tm4 = vld1q_f16(output0_tm_4);
float16x8_t _out0tm5 = vld1q_f16(output0_tm_5);
float16x8_t _tmp02a = vaddq_f16(_out0tm1, _out0tm2);
float16x8_t _tmp13a = vsubq_f16(_out0tm1, _out0tm2);
float16x8_t _tmp02b = vaddq_f16(_out0tm3, _out0tm4);
float16x8_t _tmp13b = vsubq_f16(_out0tm3, _out0tm4);
float16x8_t _tmp0m = vaddq_f16(vaddq_f16(_out0tm0, _tmp02a), _tmp02b);
float16x8_t _tmp1m = vfmaq_n_f16(_tmp13a, _tmp13b, 2.f);
float16x8_t _tmp2m = vfmaq_n_f16(_tmp02a, _tmp02b, 4.f);
float16x8_t _tmp3m = vfmaq_n_f16(vaddq_f16(_out0tm5, _tmp13a), _tmp13b, 8.f);
vst1q_f16(tmp[0][m], _tmp0m);
vst1q_f16(tmp[1][m], _tmp1m);
vst1q_f16(tmp[2][m], _tmp2m);
vst1q_f16(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 48;
output0_tm_1 += tiles * 48;
output0_tm_2 += tiles * 48;
output0_tm_3 += tiles * 48;
output0_tm_4 += tiles * 48;
output0_tm_5 += tiles * 48;
}
for (int m = 0; m < 4; m++)
{
float16x8_t _tmp00 = vld1q_f16(tmp[m][0]);
float16x8_t _tmp01 = vld1q_f16(tmp[m][1]);
float16x8_t _tmp02 = vld1q_f16(tmp[m][2]);
float16x8_t _tmp03 = vld1q_f16(tmp[m][3]);
float16x8_t _tmp04 = vld1q_f16(tmp[m][4]);
float16x8_t _tmp05 = vld1q_f16(tmp[m][5]);
float16x8_t _tmp02a = vaddq_f16(_tmp01, _tmp02);
float16x8_t _tmp13a = vsubq_f16(_tmp01, _tmp02);
float16x8_t _tmp02b = vaddq_f16(_tmp03, _tmp04);
float16x8_t _tmp13b = vsubq_f16(_tmp03, _tmp04);
float16x8_t _out00 = vaddq_f16(_bias0, vaddq_f16(vaddq_f16(_tmp00, _tmp02a), _tmp02b));
float16x8_t _out01 = vaddq_f16(_bias0, vfmaq_n_f16(_tmp13a, _tmp13b, 2.f));
float16x8_t _out02 = vaddq_f16(_bias0, vfmaq_n_f16(_tmp02a, _tmp02b, 4.f));
float16x8_t _out03 = vaddq_f16(_bias0, vfmaq_n_f16(vaddq_f16(_tmp05, _tmp13a), _tmp13b, 8.f));
vst1q_f16(output0, _out00);
vst1q_f16(output0 + 8, _out01);
vst1q_f16(output0 + 16, _out02);
vst1q_f16(output0 + 24, _out03);
output0 += outw * 8;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
__fp16* outptr0 = out0.row<__fp16>(0);
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.8h, v5.8h}, [%1] \n" // r04 r05
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v16.8h, v2.h[0] \n"
"fmla v31.8h, v16.8h, v3.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v18.8h, v3.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v1.h[3] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v20.8h, v2.h[4] \n"
"fmla v31.8h, v20.8h, v3.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v22.8h, v3.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v1.h[7] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v3.h[0] \n"
"fmla v31.8h, v16.8h, v4.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v18.8h, v4.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v3.h[4] \n"
"fmla v31.8h, v20.8h, v4.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v22.8h, v4.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v5.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v5.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v5.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v5.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v12.8h, v13.8h}, [%2] \n" // r14 r15
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v16.8h, v9.h[0] \n"
"fmla v30.8h, v16.8h, v10.h[0] \n"
"fmla v31.8h, v16.8h, v11.h[0] \n"
"fmla v28.8h, v17.8h, v8.h[1] \n"
"fmla v29.8h, v17.8h, v9.h[1] \n"
"fmla v30.8h, v17.8h, v10.h[1] \n"
"fmla v31.8h, v17.8h, v11.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v8.h[2] \n"
"fmla v29.8h, v18.8h, v9.h[2] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v11.h[2] \n"
"fmla v28.8h, v19.8h, v8.h[3] \n"
"fmla v29.8h, v19.8h, v9.h[3] \n"
"fmla v30.8h, v19.8h, v10.h[3] \n"
"fmla v31.8h, v19.8h, v11.h[3] \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v20.8h, v9.h[4] \n"
"fmla v30.8h, v20.8h, v10.h[4] \n"
"fmla v31.8h, v20.8h, v11.h[4] \n"
"fmla v28.8h, v21.8h, v8.h[5] \n"
"fmla v29.8h, v21.8h, v9.h[5] \n"
"fmla v30.8h, v21.8h, v10.h[5] \n"
"fmla v31.8h, v21.8h, v11.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v8.h[6] \n"
"fmla v29.8h, v22.8h, v9.h[6] \n"
"fmla v30.8h, v22.8h, v10.h[6] \n"
"fmla v31.8h, v22.8h, v11.h[6] \n"
"fmla v28.8h, v23.8h, v8.h[7] \n"
"fmla v29.8h, v23.8h, v9.h[7] \n"
"fmla v30.8h, v23.8h, v10.h[7] \n"
"fmla v31.8h, v23.8h, v11.h[7] \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v16.8h, v11.h[0] \n"
"fmla v31.8h, v16.8h, v12.h[0] \n"
"fmla v28.8h, v17.8h, v9.h[1] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v17.8h, v11.h[1] \n"
"fmla v31.8h, v17.8h, v12.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v9.h[2] \n"
"fmla v29.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v18.8h, v11.h[2] \n"
"fmla v31.8h, v18.8h, v12.h[2] \n"
"fmla v28.8h, v19.8h, v9.h[3] \n"
"fmla v29.8h, v19.8h, v10.h[3] \n"
"fmla v30.8h, v19.8h, v11.h[3] \n"
"fmla v31.8h, v19.8h, v12.h[3] \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v20.8h, v11.h[4] \n"
"fmla v31.8h, v20.8h, v12.h[4] \n"
"fmla v28.8h, v21.8h, v9.h[5] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v21.8h, v11.h[5] \n"
"fmla v31.8h, v21.8h, v12.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v9.h[6] \n"
"fmla v29.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v22.8h, v11.h[6] \n"
"fmla v31.8h, v22.8h, v12.h[6] \n"
"fmla v28.8h, v23.8h, v9.h[7] \n"
"fmla v29.8h, v23.8h, v10.h[7] \n"
"fmla v30.8h, v23.8h, v11.h[7] \n"
"fmla v31.8h, v23.8h, v12.h[7] \n"
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v16.8h, v12.h[0] \n"
"fmla v31.8h, v16.8h, v13.h[0] \n"
"fmla v28.8h, v17.8h, v10.h[1] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v13.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v10.h[2] \n"
"fmla v29.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v18.8h, v13.h[2] \n"
"fmla v28.8h, v19.8h, v10.h[3] \n"
"fmla v29.8h, v19.8h, v11.h[3] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v13.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v20.8h, v12.h[4] \n"
"fmla v31.8h, v20.8h, v13.h[4] \n"
"fmla v28.8h, v21.8h, v10.h[5] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v13.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v10.h[6] \n"
"fmla v29.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v22.8h, v13.h[6] \n"
"fmla v28.8h, v23.8h, v10.h[7] \n"
"fmla v29.8h, v23.8h, v11.h[7] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v13.h[7] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v4.8h, v5.8h}, [%3] \n" // r24 r25
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v16.8h, v2.h[0] \n"
"fmla v31.8h, v16.8h, v3.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v18.8h, v3.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v1.h[3] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v20.8h, v2.h[4] \n"
"fmla v31.8h, v20.8h, v3.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v22.8h, v3.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v1.h[7] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v3.h[0] \n"
"fmla v31.8h, v16.8h, v4.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v3.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v18.8h, v4.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v3.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v3.h[4] \n"
"fmla v31.8h, v20.8h, v4.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v3.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v22.8h, v4.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v3.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v5.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v5.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v5.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v5.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1] \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v30.8h, v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2] \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v5.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v5.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v5.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v5.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v16.8h, v6.h[0] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v5.h[2] \n"
"fmla v29.8h, v18.8h, v6.h[2] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v20.8h, v6.h[4] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v5.h[6] \n"
"fmla v29.8h, v22.8h, v6.h[6] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v6.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[0] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v6.h[2] \n"
"fmla v29.8h, v18.8h, v7.h[2] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3] \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v6.h[4] \n"
"fmla v29.8h, v20.8h, v7.h[4] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v6.h[6] \n"
"fmla v29.8h, v22.8h, v7.h[6] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v1.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v1.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v1.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v1.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"add %1, %1, #32 \n"
"add %2, %2, #32 \n"
"add %3, %3, #32 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmul v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"add %1, %1, #16 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"add %2, %2, #16 \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"fadd v30.8h, v30.8h, v31.8h \n"
"add %3, %3, #16 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
r0 += 16;
r1 += 16;
r2 += 16;
}
}
}
}
static void conv3x3s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = (w - 2 * outw + w) * 8;
const __fp16* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f);
out0.fill(_bias0);
for (int q = 0; q < inch; q++)
{
__fp16* outptr0 = out0;
const Mat img0 = bottom_blob.channel(q);
const __fp16* r0 = img0.row<const __fp16>(0);
const __fp16* r1 = img0.row<const __fp16>(1);
const __fp16* r2 = img0.row<const __fp16>(2);
const __fp16* kptr = kernel.channel(p).row<const __fp16>(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
asm volatile(
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%1], #64 \n" // r04 r05 r06 r07
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v6.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v6.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v6.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v5.h[0] \n"
"fmla v31.8h, v16.8h, v7.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v18.8h, v7.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v5.h[4] \n"
"fmla v31.8h, v20.8h, v7.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v22.8h, v7.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1] \n" // r08
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v16.8h, v6.h[0] \n"
"fmla v31.8h, v16.8h, v0.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v18.8h, v6.h[2] \n"
"fmla v31.8h, v18.8h, v0.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v4.h[3] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v20.8h, v6.h[4] \n"
"fmla v31.8h, v20.8h, v0.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v0.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v4.h[7] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2], #64 \n" // r14 r15 r16 r17
"fmla v28.8h, v16.8h, v8.h[0] \n"
"fmla v29.8h, v16.8h, v10.h[0] \n"
"fmla v30.8h, v16.8h, v12.h[0] \n"
"fmla v31.8h, v16.8h, v14.h[0] \n"
"fmla v28.8h, v17.8h, v8.h[1] \n"
"fmla v29.8h, v17.8h, v10.h[1] \n"
"fmla v30.8h, v17.8h, v12.h[1] \n"
"fmla v31.8h, v17.8h, v14.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v8.h[2] \n"
"fmla v29.8h, v18.8h, v10.h[2] \n"
"fmla v30.8h, v18.8h, v12.h[2] \n"
"fmla v31.8h, v18.8h, v14.h[2] \n"
"fmla v28.8h, v19.8h, v8.h[3] \n"
"fmla v29.8h, v19.8h, v10.h[3] \n"
"fmla v30.8h, v19.8h, v12.h[3] \n"
"fmla v31.8h, v19.8h, v14.h[3] \n"
"fmla v28.8h, v20.8h, v8.h[4] \n"
"fmla v29.8h, v20.8h, v10.h[4] \n"
"fmla v30.8h, v20.8h, v12.h[4] \n"
"fmla v31.8h, v20.8h, v14.h[4] \n"
"fmla v28.8h, v21.8h, v8.h[5] \n"
"fmla v29.8h, v21.8h, v10.h[5] \n"
"fmla v30.8h, v21.8h, v12.h[5] \n"
"fmla v31.8h, v21.8h, v14.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v8.h[6] \n"
"fmla v29.8h, v22.8h, v10.h[6] \n"
"fmla v30.8h, v22.8h, v12.h[6] \n"
"fmla v31.8h, v22.8h, v14.h[6] \n"
"fmla v28.8h, v23.8h, v8.h[7] \n"
"fmla v29.8h, v23.8h, v10.h[7] \n"
"fmla v30.8h, v23.8h, v12.h[7] \n"
"fmla v31.8h, v23.8h, v14.h[7] \n"
"fmla v28.8h, v16.8h, v9.h[0] \n"
"fmla v29.8h, v16.8h, v11.h[0] \n"
"fmla v30.8h, v16.8h, v13.h[0] \n"
"fmla v31.8h, v16.8h, v15.h[0] \n"
"fmla v28.8h, v17.8h, v9.h[1] \n"
"fmla v29.8h, v17.8h, v11.h[1] \n"
"fmla v30.8h, v17.8h, v13.h[1] \n"
"fmla v31.8h, v17.8h, v15.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v9.h[2] \n"
"fmla v29.8h, v18.8h, v11.h[2] \n"
"fmla v30.8h, v18.8h, v13.h[2] \n"
"fmla v31.8h, v18.8h, v15.h[2] \n"
"fmla v28.8h, v19.8h, v9.h[3] \n"
"fmla v29.8h, v19.8h, v11.h[3] \n"
"fmla v30.8h, v19.8h, v13.h[3] \n"
"fmla v31.8h, v19.8h, v15.h[3] \n"
"fmla v28.8h, v20.8h, v9.h[4] \n"
"fmla v29.8h, v20.8h, v11.h[4] \n"
"fmla v30.8h, v20.8h, v13.h[4] \n"
"fmla v31.8h, v20.8h, v15.h[4] \n"
"fmla v28.8h, v21.8h, v9.h[5] \n"
"fmla v29.8h, v21.8h, v11.h[5] \n"
"fmla v30.8h, v21.8h, v13.h[5] \n"
"fmla v31.8h, v21.8h, v15.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v9.h[6] \n"
"fmla v29.8h, v22.8h, v11.h[6] \n"
"fmla v30.8h, v22.8h, v13.h[6] \n"
"fmla v31.8h, v22.8h, v15.h[6] \n"
"fmla v28.8h, v23.8h, v9.h[7] \n"
"fmla v29.8h, v23.8h, v11.h[7] \n"
"fmla v30.8h, v23.8h, v13.h[7] \n"
"fmla v31.8h, v23.8h, v15.h[7] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v8.8h}, [%2] \n" // r18
"fmla v28.8h, v16.8h, v10.h[0] \n"
"fmla v29.8h, v16.8h, v12.h[0] \n"
"fmla v30.8h, v16.8h, v14.h[0] \n"
"fmla v31.8h, v16.8h, v8.h[0] \n"
"fmla v28.8h, v17.8h, v10.h[1] \n"
"fmla v29.8h, v17.8h, v12.h[1] \n"
"fmla v30.8h, v17.8h, v14.h[1] \n"
"fmla v31.8h, v17.8h, v8.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v10.h[2] \n"
"fmla v29.8h, v18.8h, v12.h[2] \n"
"fmla v30.8h, v18.8h, v14.h[2] \n"
"fmla v31.8h, v18.8h, v8.h[2] \n"
"fmla v28.8h, v19.8h, v10.h[3] \n"
"fmla v29.8h, v19.8h, v12.h[3] \n"
"fmla v30.8h, v19.8h, v14.h[3] \n"
"fmla v31.8h, v19.8h, v8.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v10.h[4] \n"
"fmla v29.8h, v20.8h, v12.h[4] \n"
"fmla v30.8h, v20.8h, v14.h[4] \n"
"fmla v31.8h, v20.8h, v8.h[4] \n"
"fmla v28.8h, v21.8h, v10.h[5] \n"
"fmla v29.8h, v21.8h, v12.h[5] \n"
"fmla v30.8h, v21.8h, v14.h[5] \n"
"fmla v31.8h, v21.8h, v8.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v10.h[6] \n"
"fmla v29.8h, v22.8h, v12.h[6] \n"
"fmla v30.8h, v22.8h, v14.h[6] \n"
"fmla v31.8h, v22.8h, v8.h[6] \n"
"fmla v28.8h, v23.8h, v10.h[7] \n"
"fmla v29.8h, v23.8h, v12.h[7] \n"
"fmla v30.8h, v23.8h, v14.h[7] \n"
"fmla v31.8h, v23.8h, v8.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" // r24 r25 r26 r27
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v16.8h, v4.h[0] \n"
"fmla v31.8h, v16.8h, v6.h[0] \n"
"fmla v28.8h, v17.8h, v0.h[1] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v18.8h, v6.h[2] \n"
"fmla v28.8h, v19.8h, v0.h[3] \n"
"fmla v29.8h, v19.8h, v2.h[3] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v20.8h, v4.h[4] \n"
"fmla v31.8h, v20.8h, v6.h[4] \n"
"fmla v28.8h, v21.8h, v0.h[5] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v22.8h, v6.h[6] \n"
"fmla v28.8h, v23.8h, v0.h[7] \n"
"fmla v29.8h, v23.8h, v2.h[7] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v16.8h, v5.h[0] \n"
"fmla v31.8h, v16.8h, v7.h[0] \n"
"fmla v28.8h, v17.8h, v1.h[1] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v18.8h, v7.h[2] \n"
"fmla v28.8h, v19.8h, v1.h[3] \n"
"fmla v29.8h, v19.8h, v3.h[3] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v20.8h, v5.h[4] \n"
"fmla v31.8h, v20.8h, v7.h[4] \n"
"fmla v28.8h, v21.8h, v1.h[5] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v22.8h, v7.h[6] \n"
"fmla v28.8h, v23.8h, v1.h[7] \n"
"fmla v29.8h, v23.8h, v3.h[7] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3] \n" // r28
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v16.8h, v6.h[0] \n"
"fmla v31.8h, v16.8h, v0.h[0] \n"
"fmla v28.8h, v17.8h, v2.h[1] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v18.8h, v6.h[2] \n"
"fmla v31.8h, v18.8h, v0.h[2] \n"
"fmla v28.8h, v19.8h, v2.h[3] \n"
"fmla v29.8h, v19.8h, v4.h[3] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v20.8h, v6.h[4] \n"
"fmla v31.8h, v20.8h, v0.h[4] \n"
"fmla v28.8h, v21.8h, v2.h[5] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v22.8h, v6.h[6] \n"
"fmla v31.8h, v22.8h, v0.h[6] \n"
"fmla v28.8h, v23.8h, v2.h[7] \n"
"fmla v29.8h, v23.8h, v4.h[7] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j + 1 < outw; j += 2)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v30.8h, v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.8h}, [%1] \n" // r04
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r10 r11 r12 r13
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v16.8h, v6.h[0] \n"
"fmla v30.8h, v17.8h, v4.h[1] \n"
"fmla v31.8h, v17.8h, v6.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v4.h[2] \n"
"fmla v29.8h, v18.8h, v6.h[2] \n"
"fmla v30.8h, v19.8h, v4.h[3] \n"
"fmla v31.8h, v19.8h, v6.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v20.8h, v6.h[4] \n"
"fmla v30.8h, v21.8h, v4.h[5] \n"
"fmla v31.8h, v21.8h, v6.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v4.h[6] \n"
"fmla v29.8h, v22.8h, v6.h[6] \n"
"fmla v30.8h, v23.8h, v4.h[7] \n"
"fmla v31.8h, v23.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[0] \n"
"fmla v30.8h, v17.8h, v5.h[1] \n"
"fmla v31.8h, v17.8h, v7.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v5.h[2] \n"
"fmla v29.8h, v18.8h, v7.h[2] \n"
"fmla v30.8h, v19.8h, v5.h[3] \n"
"fmla v31.8h, v19.8h, v7.h[3] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v4.8h}, [%2] \n" // r14
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v20.8h, v7.h[4] \n"
"fmla v30.8h, v21.8h, v5.h[5] \n"
"fmla v31.8h, v21.8h, v7.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v5.h[6] \n"
"fmla v29.8h, v22.8h, v7.h[6] \n"
"fmla v30.8h, v23.8h, v5.h[7] \n"
"fmla v31.8h, v23.8h, v7.h[7] \n"
"fmla v28.8h, v16.8h, v6.h[0] \n"
"fmla v29.8h, v16.8h, v4.h[0] \n"
"fmla v30.8h, v17.8h, v6.h[1] \n"
"fmla v31.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v6.h[2] \n"
"fmla v29.8h, v18.8h, v4.h[2] \n"
"fmla v30.8h, v19.8h, v6.h[3] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23
"fmla v28.8h, v20.8h, v6.h[4] \n"
"fmla v29.8h, v20.8h, v4.h[4] \n"
"fmla v30.8h, v21.8h, v6.h[5] \n"
"fmla v31.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v6.h[6] \n"
"fmla v29.8h, v22.8h, v4.h[6] \n"
"fmla v30.8h, v23.8h, v6.h[7] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v16.8h, v2.h[0] \n"
"fmla v30.8h, v17.8h, v0.h[1] \n"
"fmla v31.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v0.h[2] \n"
"fmla v29.8h, v18.8h, v2.h[2] \n"
"fmla v30.8h, v19.8h, v0.h[3] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v20.8h, v2.h[4] \n"
"fmla v30.8h, v21.8h, v0.h[5] \n"
"fmla v31.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v0.h[6] \n"
"fmla v29.8h, v22.8h, v2.h[6] \n"
"fmla v30.8h, v23.8h, v0.h[7] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v16.8h, v3.h[0] \n"
"fmla v30.8h, v17.8h, v1.h[1] \n"
"fmla v31.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v28.8h, v18.8h, v1.h[2] \n"
"fmla v29.8h, v18.8h, v3.h[2] \n"
"fmla v30.8h, v19.8h, v1.h[3] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.8h}, [%3] \n" // r24
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v20.8h, v3.h[4] \n"
"fmla v30.8h, v21.8h, v1.h[5] \n"
"fmla v31.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v28.8h, v22.8h, v1.h[6] \n"
"fmla v29.8h, v22.8h, v3.h[6] \n"
"fmla v30.8h, v23.8h, v1.h[7] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v16.8h, v0.h[0] \n"
"fmla v30.8h, v17.8h, v2.h[1] \n"
"fmla v31.8h, v17.8h, v0.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v28.8h, v18.8h, v2.h[2] \n"
"fmla v29.8h, v18.8h, v0.h[2] \n"
"fmla v30.8h, v19.8h, v2.h[3] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v20.8h, v0.h[4] \n"
"fmla v30.8h, v21.8h, v2.h[5] \n"
"fmla v31.8h, v21.8h, v0.h[5] \n"
"fmla v28.8h, v22.8h, v2.h[6] \n"
"fmla v29.8h, v22.8h, v0.h[6] \n"
"fmla v30.8h, v23.8h, v2.h[7] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"fadd v29.8h, v29.8h, v31.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h, v29.8h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v31.8h}, [%0] \n" // sum0
"fmul v28.8h, v16.8h, v0.h[0] \n"
"fmul v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmul v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"fmla v28.8h, v16.8h, v3.h[0] \n"
"fmla v29.8h, v17.8h, v3.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v3.h[2] \n"
"fmla v31.8h, v19.8h, v3.h[3] \n"
"fmla v28.8h, v20.8h, v3.h[4] \n"
"fmla v29.8h, v21.8h, v3.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v3.h[6] \n"
"fmla v31.8h, v23.8h, v3.h[7] \n"
"fmla v28.8h, v16.8h, v4.h[0] \n"
"fmla v29.8h, v17.8h, v4.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v4.h[2] \n"
"fmla v31.8h, v19.8h, v4.h[3] \n"
"fmla v28.8h, v20.8h, v4.h[4] \n"
"fmla v29.8h, v21.8h, v4.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v4.h[6] \n"
"fmla v31.8h, v23.8h, v4.h[7] \n"
"fmla v28.8h, v16.8h, v5.h[0] \n"
"fmla v29.8h, v17.8h, v5.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v5.h[2] \n"
"fmla v31.8h, v19.8h, v5.h[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22
"fmla v28.8h, v20.8h, v5.h[4] \n"
"fmla v29.8h, v21.8h, v5.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v5.h[6] \n"
"fmla v31.8h, v23.8h, v5.h[7] \n"
"fmla v28.8h, v16.8h, v0.h[0] \n"
"fmla v29.8h, v17.8h, v0.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v0.h[2] \n"
"fmla v31.8h, v19.8h, v0.h[3] \n"
"fmla v28.8h, v20.8h, v0.h[4] \n"
"fmla v29.8h, v21.8h, v0.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v0.h[6] \n"
"fmla v31.8h, v23.8h, v0.h[7] \n"
"fmla v28.8h, v16.8h, v1.h[0] \n"
"fmla v29.8h, v17.8h, v1.h[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4], #64 \n"
"fmla v30.8h, v18.8h, v1.h[2] \n"
"fmla v31.8h, v19.8h, v1.h[3] \n"
"fmla v28.8h, v20.8h, v1.h[4] \n"
"fmla v29.8h, v21.8h, v1.h[5] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%4], #64 \n"
"fmla v30.8h, v22.8h, v1.h[6] \n"
"fmla v31.8h, v23.8h, v1.h[7] \n"
"fmla v28.8h, v16.8h, v2.h[0] \n"
"fmla v29.8h, v17.8h, v2.h[1] \n"
// "prfm pldl1keep, [%4, #512] \n"
"ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n"
"fmla v30.8h, v18.8h, v2.h[2] \n"
"fmla v31.8h, v19.8h, v2.h[3] \n"
"fmla v28.8h, v20.8h, v2.h[4] \n"
"fmla v29.8h, v21.8h, v2.h[5] \n"
"add %1, %1, #32 \n"
"fmla v30.8h, v22.8h, v2.h[6] \n"
"fmla v31.8h, v23.8h, v2.h[7] \n"
"add %2, %2, #32 \n"
"fadd v28.8h, v28.8h, v29.8h \n"
"fadd v30.8h, v30.8h, v31.8h \n"
"add %3, %3, #32 \n"
"fadd v28.8h, v28.8h, v30.8h \n"
"sub %4, %4, #1088 \n" // kptr -= 8.5 * 64;
"st1 {v28.8h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(kptr) // %4
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31");
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
nested_mixed.c | // RUN: %libomp-compile && env OMP_DISPLAY_AFFINITY=true %libomp-run | %python %S/check.py -c 'CHECK' %s
// REQUIRES: !abt
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char** argv) {
omp_set_affinity_format("TESTER: tl:%L at:%a tn:%n nt:%N");
omp_set_nested(1);
#pragma omp parallel num_threads(1)
{
#pragma omp parallel num_threads(2)
{ }
#pragma omp parallel num_threads(2)
{
#pragma omp parallel num_threads(1)
{
#pragma omp parallel num_threads(2)
{ }
}
}
#pragma omp parallel num_threads(1)
{ }
}
#pragma omp parallel num_threads(2)
{ }
#pragma omp parallel num_threads(1)
{ }
return 0;
}
// CHECK: num_threads=1 TESTER: tl:1 at:0 tn:0 nt:1
// CHECK: num_threads=2 TESTER: tl:2 at:[0-9] tn:[01] nt:2
// CHECK: num_threads=1 TESTER: tl:3 at:[0-9] tn:0 nt:1
// CHECK: num_threads=1 TESTER: tl:3 at:[0-9] tn:0 nt:1
// CHECK: num_threads=2 TESTER: tl:4 at:[0-9] tn:[01] nt:2
// CHECK: num_threads=2 TESTER: tl:4 at:[0-9] tn:[01] nt:2
// CHECK: num_threads=1 TESTER: tl:2 at:[0-9] tn:0 nt:1
// CHECK: num_threads=2 TESTER: tl:1 at:[0-9] tn:[01] nt:2
// CHECK: num_threads=1 TESTER: tl:1 at:[0-9] tn:0 nt:1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.